2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/internal.h"
31 #include "libavutil/intmath.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/opt.h"
39 #include "mpegvideo.h"
47 #include "aandcttab.h"
49 #include "mpeg4video.h"
51 #include "bytestream.h"
54 static int encode_picture(MpegEncContext *s, int picture_number);
55 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
56 static int sse_mb(MpegEncContext *s);
57 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
58 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
60 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
61 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
63 const AVOption ff_mpv_generic_options[] = {
68 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
69 uint16_t (*qmat16)[2][64],
70 const uint16_t *quant_matrix,
71 int bias, int qmin, int qmax, int intra)
76 for (qscale = qmin; qscale <= qmax; qscale++) {
78 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
79 dsp->fdct == ff_jpeg_fdct_islow_10 ||
80 dsp->fdct == ff_faandct) {
81 for (i = 0; i < 64; i++) {
82 const int j = dsp->idct_permutation[i];
83 /* 16 <= qscale * quant_matrix[i] <= 7905
84 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
85 * 19952 <= x <= 249205026
86 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
87 * 3444240 >= (1 << 36) / (x) >= 275 */
89 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
90 (qscale * quant_matrix[j]));
92 } else if (dsp->fdct == ff_fdct_ifast) {
93 for (i = 0; i < 64; i++) {
94 const int j = dsp->idct_permutation[i];
95 /* 16 <= qscale * quant_matrix[i] <= 7905
96 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
97 * 19952 <= x <= 249205026
98 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
99 * 3444240 >= (1 << 36) / (x) >= 275 */
101 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
102 (ff_aanscales[i] * qscale *
106 for (i = 0; i < 64; i++) {
107 const int j = dsp->idct_permutation[i];
108 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
109 * Assume x = qscale * quant_matrix[i]
111 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
112 * so 32768 >= (1 << 19) / (x) >= 67 */
113 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
114 (qscale * quant_matrix[j]));
115 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
116 // (qscale * quant_matrix[i]);
117 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
118 (qscale * quant_matrix[j]);
120 if (qmat16[qscale][0][i] == 0 ||
121 qmat16[qscale][0][i] == 128 * 256)
122 qmat16[qscale][0][i] = 128 * 256 - 1;
123 qmat16[qscale][1][i] =
124 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
125 qmat16[qscale][0][i]);
129 for (i = intra; i < 64; i++) {
131 if (dsp->fdct == ff_fdct_ifast) {
132 max = (8191LL * ff_aanscales[i]) >> 14;
134 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
140 av_log(NULL, AV_LOG_INFO,
141 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
146 static inline void update_qscale(MpegEncContext *s)
148 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
149 (FF_LAMBDA_SHIFT + 7);
150 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
152 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
156 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
162 for (i = 0; i < 64; i++) {
163 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
170 * init s->current_picture.qscale_table from s->lambda_table
172 void ff_init_qscale_tab(MpegEncContext *s)
174 int8_t * const qscale_table = s->current_picture.qscale_table;
177 for (i = 0; i < s->mb_num; i++) {
178 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
179 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
180 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
185 static void update_duplicate_context_after_me(MpegEncContext *dst,
188 #define COPY(a) dst->a= src->a
190 COPY(current_picture);
196 COPY(picture_in_gop_number);
197 COPY(gop_picture_number);
198 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
199 COPY(progressive_frame); // FIXME don't set in encode_header
200 COPY(partitioned_frame); // FIXME don't set in encode_header
205 * Set the given MpegEncContext to defaults for encoding.
206 * the changed fields will not depend upon the prior state of the MpegEncContext.
208 static void MPV_encode_defaults(MpegEncContext *s)
211 ff_MPV_common_defaults(s);
213 for (i = -16; i < 16; i++) {
214 default_fcode_tab[i + MAX_MV] = 1;
216 s->me.mv_penalty = default_mv_penalty;
217 s->fcode_tab = default_fcode_tab;
220 /* init video encoder */
221 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
223 MpegEncContext *s = avctx->priv_data;
225 int chroma_h_shift, chroma_v_shift;
227 MPV_encode_defaults(s);
229 switch (avctx->codec_id) {
230 case AV_CODEC_ID_MPEG2VIDEO:
231 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
232 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
233 av_log(avctx, AV_LOG_ERROR,
234 "only YUV420 and YUV422 are supported\n");
238 case AV_CODEC_ID_LJPEG:
239 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
240 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
241 avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
242 avctx->pix_fmt != AV_PIX_FMT_BGRA &&
243 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
244 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
245 avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
246 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
247 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
251 case AV_CODEC_ID_MJPEG:
252 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
253 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
254 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
255 avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
256 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
257 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
262 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
263 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
268 switch (avctx->pix_fmt) {
269 case AV_PIX_FMT_YUVJ422P:
270 case AV_PIX_FMT_YUV422P:
271 s->chroma_format = CHROMA_422;
273 case AV_PIX_FMT_YUVJ420P:
274 case AV_PIX_FMT_YUV420P:
276 s->chroma_format = CHROMA_420;
280 s->bit_rate = avctx->bit_rate;
281 s->width = avctx->width;
282 s->height = avctx->height;
283 if (avctx->gop_size > 600 &&
284 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
285 av_log(avctx, AV_LOG_ERROR,
286 "Warning keyframe interval too large! reducing it ...\n");
287 avctx->gop_size = 600;
289 s->gop_size = avctx->gop_size;
291 s->flags = avctx->flags;
292 s->flags2 = avctx->flags2;
293 if (avctx->max_b_frames > MAX_B_FRAMES) {
294 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
295 "is %d.\n", MAX_B_FRAMES);
297 s->max_b_frames = avctx->max_b_frames;
298 s->codec_id = avctx->codec->id;
299 s->strict_std_compliance = avctx->strict_std_compliance;
300 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
301 s->mpeg_quant = avctx->mpeg_quant;
302 s->rtp_mode = !!avctx->rtp_payload_size;
303 s->intra_dc_precision = avctx->intra_dc_precision;
304 s->user_specified_pts = AV_NOPTS_VALUE;
306 if (s->gop_size <= 1) {
313 s->me_method = avctx->me_method;
316 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
318 s->adaptive_quant = (s->avctx->lumi_masking ||
319 s->avctx->dark_masking ||
320 s->avctx->temporal_cplx_masking ||
321 s->avctx->spatial_cplx_masking ||
322 s->avctx->p_masking ||
323 s->avctx->border_masking ||
324 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
327 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
329 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
330 av_log(avctx, AV_LOG_ERROR,
331 "a vbv buffer size is needed, "
332 "for encoding with a maximum bitrate\n");
336 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
337 av_log(avctx, AV_LOG_INFO,
338 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
341 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
342 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
346 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
347 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
351 if (avctx->rc_max_rate &&
352 avctx->rc_max_rate == avctx->bit_rate &&
353 avctx->rc_max_rate != avctx->rc_min_rate) {
354 av_log(avctx, AV_LOG_INFO,
355 "impossible bitrate constraints, this will fail\n");
358 if (avctx->rc_buffer_size &&
359 avctx->bit_rate * (int64_t)avctx->time_base.num >
360 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
361 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
365 if (!s->fixed_qscale &&
366 avctx->bit_rate * av_q2d(avctx->time_base) >
367 avctx->bit_rate_tolerance) {
368 av_log(avctx, AV_LOG_ERROR,
369 "bitrate tolerance too small for bitrate\n");
373 if (s->avctx->rc_max_rate &&
374 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
375 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
376 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
377 90000LL * (avctx->rc_buffer_size - 1) >
378 s->avctx->rc_max_rate * 0xFFFFLL) {
379 av_log(avctx, AV_LOG_INFO,
380 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
381 "specified vbv buffer is too large for the given bitrate!\n");
384 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
385 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
386 s->codec_id != AV_CODEC_ID_FLV1) {
387 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
391 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
392 av_log(avctx, AV_LOG_ERROR,
393 "OBMC is only supported with simple mb decision\n");
397 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
398 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
402 if (s->max_b_frames &&
403 s->codec_id != AV_CODEC_ID_MPEG4 &&
404 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
405 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
406 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
410 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
411 s->codec_id == AV_CODEC_ID_H263 ||
412 s->codec_id == AV_CODEC_ID_H263P) &&
413 (avctx->sample_aspect_ratio.num > 255 ||
414 avctx->sample_aspect_ratio.den > 255)) {
415 av_log(avctx, AV_LOG_ERROR,
416 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
417 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
421 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
422 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
423 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
427 // FIXME mpeg2 uses that too
428 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
429 av_log(avctx, AV_LOG_ERROR,
430 "mpeg2 style quantization not supported by codec\n");
434 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
435 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
439 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
440 s->avctx->mb_decision != FF_MB_DECISION_RD) {
441 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
445 if (s->avctx->scenechange_threshold < 1000000000 &&
446 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
447 av_log(avctx, AV_LOG_ERROR,
448 "closed gop with scene change detection are not supported yet, "
449 "set threshold to 1000000000\n");
453 if (s->flags & CODEC_FLAG_LOW_DELAY) {
454 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
455 av_log(avctx, AV_LOG_ERROR,
456 "low delay forcing is only available for mpeg2\n");
459 if (s->max_b_frames != 0) {
460 av_log(avctx, AV_LOG_ERROR,
461 "b frames cannot be used with low delay\n");
466 if (s->q_scale_type == 1) {
467 if (avctx->qmax > 12) {
468 av_log(avctx, AV_LOG_ERROR,
469 "non linear quant only supports qmax <= 12 currently\n");
474 if (s->avctx->thread_count > 1 &&
475 s->codec_id != AV_CODEC_ID_MPEG4 &&
476 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
477 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
478 (s->codec_id != AV_CODEC_ID_H263P)) {
479 av_log(avctx, AV_LOG_ERROR,
480 "multi threaded encoding not supported by codec\n");
484 if (s->avctx->thread_count < 1) {
485 av_log(avctx, AV_LOG_ERROR,
486 "automatic thread number detection not supported by codec,"
491 if (s->avctx->thread_count > 1)
494 if (!avctx->time_base.den || !avctx->time_base.num) {
495 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
499 i = (INT_MAX / 2 + 128) >> 8;
500 if (avctx->mb_threshold >= i) {
501 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
506 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
507 av_log(avctx, AV_LOG_INFO,
508 "notice: b_frame_strategy only affects the first pass\n");
509 avctx->b_frame_strategy = 0;
512 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
514 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
515 avctx->time_base.den /= i;
516 avctx->time_base.num /= i;
520 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
521 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
522 // (a + x * 3 / 8) / x
523 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
524 s->inter_quant_bias = 0;
526 s->intra_quant_bias = 0;
528 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
531 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
532 s->intra_quant_bias = avctx->intra_quant_bias;
533 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
534 s->inter_quant_bias = avctx->inter_quant_bias;
536 av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
539 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
540 s->avctx->time_base.den > (1 << 16) - 1) {
541 av_log(avctx, AV_LOG_ERROR,
542 "timebase %d/%d not supported by MPEG 4 standard, "
543 "the maximum admitted value for the timebase denominator "
544 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
548 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
550 switch (avctx->codec->id) {
551 case AV_CODEC_ID_MPEG1VIDEO:
552 s->out_format = FMT_MPEG1;
553 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
554 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
556 case AV_CODEC_ID_MPEG2VIDEO:
557 s->out_format = FMT_MPEG1;
558 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
559 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
562 case AV_CODEC_ID_LJPEG:
563 case AV_CODEC_ID_MJPEG:
564 s->out_format = FMT_MJPEG;
565 s->intra_only = 1; /* force intra only for jpeg */
566 if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
567 avctx->pix_fmt == AV_PIX_FMT_BGRA) {
568 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
569 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
570 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
572 s->mjpeg_vsample[0] = 2;
573 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
574 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
575 s->mjpeg_hsample[0] = 2;
576 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
577 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
579 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
580 ff_mjpeg_encode_init(s) < 0)
585 case AV_CODEC_ID_H261:
586 if (!CONFIG_H261_ENCODER)
588 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
589 av_log(avctx, AV_LOG_ERROR,
590 "The specified picture size of %dx%d is not valid for the "
591 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
592 s->width, s->height);
595 s->out_format = FMT_H261;
599 case AV_CODEC_ID_H263:
600 if (!CONFIG_H263_ENCODER)
602 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
603 s->width, s->height) == 8) {
604 av_log(avctx, AV_LOG_INFO,
605 "The specified picture size of %dx%d is not valid for "
606 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
607 "352x288, 704x576, and 1408x1152."
608 "Try H.263+.\n", s->width, s->height);
611 s->out_format = FMT_H263;
615 case AV_CODEC_ID_H263P:
616 s->out_format = FMT_H263;
619 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
620 s->modified_quant = s->h263_aic;
621 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
622 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
625 /* These are just to be sure */
629 case AV_CODEC_ID_FLV1:
630 s->out_format = FMT_H263;
631 s->h263_flv = 2; /* format = 1; 11-bit codes */
632 s->unrestricted_mv = 1;
633 s->rtp_mode = 0; /* don't allow GOB */
637 case AV_CODEC_ID_RV10:
638 s->out_format = FMT_H263;
642 case AV_CODEC_ID_RV20:
643 s->out_format = FMT_H263;
646 s->modified_quant = 1;
650 s->unrestricted_mv = 0;
652 case AV_CODEC_ID_MPEG4:
653 s->out_format = FMT_H263;
655 s->unrestricted_mv = 1;
656 s->low_delay = s->max_b_frames ? 0 : 1;
657 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
659 case AV_CODEC_ID_MSMPEG4V2:
660 s->out_format = FMT_H263;
662 s->unrestricted_mv = 1;
663 s->msmpeg4_version = 2;
667 case AV_CODEC_ID_MSMPEG4V3:
668 s->out_format = FMT_H263;
670 s->unrestricted_mv = 1;
671 s->msmpeg4_version = 3;
672 s->flipflop_rounding = 1;
676 case AV_CODEC_ID_WMV1:
677 s->out_format = FMT_H263;
679 s->unrestricted_mv = 1;
680 s->msmpeg4_version = 4;
681 s->flipflop_rounding = 1;
685 case AV_CODEC_ID_WMV2:
686 s->out_format = FMT_H263;
688 s->unrestricted_mv = 1;
689 s->msmpeg4_version = 5;
690 s->flipflop_rounding = 1;
698 avctx->has_b_frames = !s->low_delay;
702 s->progressive_frame =
703 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
704 CODEC_FLAG_INTERLACED_ME) ||
708 if (ff_MPV_common_init(s) < 0)
712 ff_MPV_encode_init_x86(s);
714 ff_h263dsp_init(&s->h263dsp);
715 if (!s->dct_quantize)
716 s->dct_quantize = ff_dct_quantize_c;
718 s->denoise_dct = denoise_dct_c;
719 s->fast_dct_quantize = s->dct_quantize;
721 s->dct_quantize = dct_quantize_trellis_c;
723 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
724 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
726 s->quant_precision = 5;
728 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
729 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
731 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
732 ff_h261_encode_init(s);
733 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
734 ff_h263_encode_init(s);
735 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
736 ff_msmpeg4_encode_init(s);
737 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
738 && s->out_format == FMT_MPEG1)
739 ff_mpeg1_encode_init(s);
742 for (i = 0; i < 64; i++) {
743 int j = s->dsp.idct_permutation[i];
744 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
746 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
747 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
748 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
750 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
753 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
754 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
756 if (s->avctx->intra_matrix)
757 s->intra_matrix[j] = s->avctx->intra_matrix[i];
758 if (s->avctx->inter_matrix)
759 s->inter_matrix[j] = s->avctx->inter_matrix[i];
762 /* precompute matrix */
763 /* for mjpeg, we do include qscale in the matrix */
764 if (s->out_format != FMT_MJPEG) {
765 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
766 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
768 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
769 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
773 if (ff_rate_control_init(s) < 0)
776 #if FF_API_ERROR_RATE
777 FF_DISABLE_DEPRECATION_WARNINGS
778 if (avctx->error_rate)
779 s->error_rate = avctx->error_rate;
780 FF_ENABLE_DEPRECATION_WARNINGS;
786 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
788 MpegEncContext *s = avctx->priv_data;
790 ff_rate_control_uninit(s);
792 ff_MPV_common_end(s);
793 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
794 s->out_format == FMT_MJPEG)
795 ff_mjpeg_encode_close(s);
797 av_freep(&avctx->extradata);
802 static int get_sae(uint8_t *src, int ref, int stride)
807 for (y = 0; y < 16; y++) {
808 for (x = 0; x < 16; x++) {
809 acc += FFABS(src[x + y * stride] - ref);
816 static int get_intra_count(MpegEncContext *s, uint8_t *src,
817 uint8_t *ref, int stride)
825 for (y = 0; y < h; y += 16) {
826 for (x = 0; x < w; x += 16) {
827 int offset = x + y * stride;
828 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
830 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
831 int sae = get_sae(src + offset, mean, stride);
833 acc += sae + 500 < sad;
840 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
844 int i, display_picture_number = 0, ret;
845 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
846 (s->low_delay ? 0 : 1);
851 display_picture_number = s->input_picture_number++;
853 if (pts != AV_NOPTS_VALUE) {
854 if (s->user_specified_pts != AV_NOPTS_VALUE) {
856 int64_t last = s->user_specified_pts;
859 av_log(s->avctx, AV_LOG_ERROR,
860 "Error, Invalid timestamp=%"PRId64", "
861 "last=%"PRId64"\n", pts, s->user_specified_pts);
865 if (!s->low_delay && display_picture_number == 1)
866 s->dts_delta = time - last;
868 s->user_specified_pts = pts;
870 if (s->user_specified_pts != AV_NOPTS_VALUE) {
871 s->user_specified_pts =
872 pts = s->user_specified_pts + 1;
873 av_log(s->avctx, AV_LOG_INFO,
874 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
877 pts = display_picture_number;
883 if (!pic_arg->buf[0]);
885 if (pic_arg->linesize[0] != s->linesize)
887 if (pic_arg->linesize[1] != s->uvlinesize)
889 if (pic_arg->linesize[2] != s->uvlinesize)
892 av_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
893 pic_arg->linesize[1], s->linesize, s->uvlinesize);
896 i = ff_find_unused_picture(s, 1);
900 pic = &s->picture[i];
903 if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
905 if (ff_alloc_picture(s, pic, 1) < 0) {
909 i = ff_find_unused_picture(s, 0);
913 pic = &s->picture[i];
916 if (ff_alloc_picture(s, pic, 0) < 0) {
920 if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
921 pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
922 pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
925 int h_chroma_shift, v_chroma_shift;
926 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
930 for (i = 0; i < 3; i++) {
931 int src_stride = pic_arg->linesize[i];
932 int dst_stride = i ? s->uvlinesize : s->linesize;
933 int h_shift = i ? h_chroma_shift : 0;
934 int v_shift = i ? v_chroma_shift : 0;
935 int w = s->width >> h_shift;
936 int h = s->height >> v_shift;
937 uint8_t *src = pic_arg->data[i];
938 uint8_t *dst = pic->f.data[i];
940 if (!s->avctx->rc_buffer_size)
941 dst += INPLACE_OFFSET;
943 if (src_stride == dst_stride)
944 memcpy(dst, src, src_stride * h);
955 ret = av_frame_copy_props(&pic->f, pic_arg);
959 pic->f.display_picture_number = display_picture_number;
960 pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
963 /* shift buffer entries */
964 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
965 s->input_picture[i - 1] = s->input_picture[i];
967 s->input_picture[encoding_delay] = (Picture*) pic;
972 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
978 for (plane = 0; plane < 3; plane++) {
979 const int stride = p->f.linesize[plane];
980 const int bw = plane ? 1 : 2;
981 for (y = 0; y < s->mb_height * bw; y++) {
982 for (x = 0; x < s->mb_width * bw; x++) {
983 int off = p->shared ? 0 : 16;
984 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
985 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
986 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
988 switch (s->avctx->frame_skip_exp) {
989 case 0: score = FFMAX(score, v); break;
990 case 1: score += FFABS(v); break;
991 case 2: score += v * v; break;
992 case 3: score64 += FFABS(v * v * (int64_t)v); break;
993 case 4: score64 += v * v * (int64_t)(v * v); break;
1002 if (score64 < s->avctx->frame_skip_threshold)
1004 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1009 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1011 AVPacket pkt = { 0 };
1012 int ret, got_output;
1014 av_init_packet(&pkt);
1015 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1020 av_free_packet(&pkt);
1024 static int estimate_best_b_count(MpegEncContext *s)
1026 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1027 AVCodecContext *c = avcodec_alloc_context3(NULL);
1028 AVFrame input[MAX_B_FRAMES + 2];
1029 const int scale = s->avctx->brd_scale;
1030 int i, j, out_size, p_lambda, b_lambda, lambda2;
1031 int64_t best_rd = INT64_MAX;
1032 int best_b_count = -1;
1034 assert(scale >= 0 && scale <= 3);
1037 //s->next_picture_ptr->quality;
1038 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1039 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1040 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1041 if (!b_lambda) // FIXME we should do this somewhere else
1042 b_lambda = p_lambda;
1043 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1046 c->width = s->width >> scale;
1047 c->height = s->height >> scale;
1048 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1049 CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1050 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1051 c->mb_decision = s->avctx->mb_decision;
1052 c->me_cmp = s->avctx->me_cmp;
1053 c->mb_cmp = s->avctx->mb_cmp;
1054 c->me_sub_cmp = s->avctx->me_sub_cmp;
1055 c->pix_fmt = AV_PIX_FMT_YUV420P;
1056 c->time_base = s->avctx->time_base;
1057 c->max_b_frames = s->max_b_frames;
1059 if (avcodec_open2(c, codec, NULL) < 0)
1062 for (i = 0; i < s->max_b_frames + 2; i++) {
1063 int ysize = c->width * c->height;
1064 int csize = (c->width / 2) * (c->height / 2);
1065 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1066 s->next_picture_ptr;
1068 avcodec_get_frame_defaults(&input[i]);
1069 input[i].data[0] = av_malloc(ysize + 2 * csize);
1070 input[i].data[1] = input[i].data[0] + ysize;
1071 input[i].data[2] = input[i].data[1] + csize;
1072 input[i].linesize[0] = c->width;
1073 input[i].linesize[1] =
1074 input[i].linesize[2] = c->width / 2;
1076 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1077 pre_input = *pre_input_ptr;
1079 if (!pre_input.shared && i) {
1080 pre_input.f.data[0] += INPLACE_OFFSET;
1081 pre_input.f.data[1] += INPLACE_OFFSET;
1082 pre_input.f.data[2] += INPLACE_OFFSET;
1085 s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1086 pre_input.f.data[0], pre_input.f.linesize[0],
1087 c->width, c->height);
1088 s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1089 pre_input.f.data[1], pre_input.f.linesize[1],
1090 c->width >> 1, c->height >> 1);
1091 s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1092 pre_input.f.data[2], pre_input.f.linesize[2],
1093 c->width >> 1, c->height >> 1);
1097 for (j = 0; j < s->max_b_frames + 1; j++) {
1100 if (!s->input_picture[j])
1103 c->error[0] = c->error[1] = c->error[2] = 0;
1105 input[0].pict_type = AV_PICTURE_TYPE_I;
1106 input[0].quality = 1 * FF_QP2LAMBDA;
1108 out_size = encode_frame(c, &input[0]);
1110 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1112 for (i = 0; i < s->max_b_frames + 1; i++) {
1113 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1115 input[i + 1].pict_type = is_p ?
1116 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1117 input[i + 1].quality = is_p ? p_lambda : b_lambda;
1119 out_size = encode_frame(c, &input[i + 1]);
1121 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1124 /* get the delayed frames */
1126 out_size = encode_frame(c, NULL);
1127 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1130 rd += c->error[0] + c->error[1] + c->error[2];
1141 for (i = 0; i < s->max_b_frames + 2; i++) {
1142 av_freep(&input[i].data[0]);
1145 return best_b_count;
1148 static int select_input_picture(MpegEncContext *s)
1152 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1153 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1154 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1156 /* set next picture type & ordering */
1157 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1158 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1159 s->next_picture_ptr == NULL || s->intra_only) {
1160 s->reordered_input_picture[0] = s->input_picture[0];
1161 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1162 s->reordered_input_picture[0]->f.coded_picture_number =
1163 s->coded_picture_number++;
1167 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1168 if (s->picture_in_gop_number < s->gop_size &&
1169 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1170 // FIXME check that te gop check above is +-1 correct
1171 av_frame_unref(&s->input_picture[0]->f);
1174 ff_vbv_update(s, 0);
1180 if (s->flags & CODEC_FLAG_PASS2) {
1181 for (i = 0; i < s->max_b_frames + 1; i++) {
1182 int pict_num = s->input_picture[0]->f.display_picture_number + i;
1184 if (pict_num >= s->rc_context.num_entries)
1186 if (!s->input_picture[i]) {
1187 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1191 s->input_picture[i]->f.pict_type =
1192 s->rc_context.entry[pict_num].new_pict_type;
1196 if (s->avctx->b_frame_strategy == 0) {
1197 b_frames = s->max_b_frames;
1198 while (b_frames && !s->input_picture[b_frames])
1200 } else if (s->avctx->b_frame_strategy == 1) {
1201 for (i = 1; i < s->max_b_frames + 1; i++) {
1202 if (s->input_picture[i] &&
1203 s->input_picture[i]->b_frame_score == 0) {
1204 s->input_picture[i]->b_frame_score =
1206 s->input_picture[i ]->f.data[0],
1207 s->input_picture[i - 1]->f.data[0],
1211 for (i = 0; i < s->max_b_frames + 1; i++) {
1212 if (s->input_picture[i] == NULL ||
1213 s->input_picture[i]->b_frame_score - 1 >
1214 s->mb_num / s->avctx->b_sensitivity)
1218 b_frames = FFMAX(0, i - 1);
1221 for (i = 0; i < b_frames + 1; i++) {
1222 s->input_picture[i]->b_frame_score = 0;
1224 } else if (s->avctx->b_frame_strategy == 2) {
1225 b_frames = estimate_best_b_count(s);
1227 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1233 for (i = b_frames - 1; i >= 0; i--) {
1234 int type = s->input_picture[i]->f.pict_type;
1235 if (type && type != AV_PICTURE_TYPE_B)
1238 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1239 b_frames == s->max_b_frames) {
1240 av_log(s->avctx, AV_LOG_ERROR,
1241 "warning, too many b frames in a row\n");
1244 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1245 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1246 s->gop_size > s->picture_in_gop_number) {
1247 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1249 if (s->flags & CODEC_FLAG_CLOSED_GOP)
1251 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1255 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1256 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1259 s->reordered_input_picture[0] = s->input_picture[b_frames];
1260 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1261 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1262 s->reordered_input_picture[0]->f.coded_picture_number =
1263 s->coded_picture_number++;
1264 for (i = 0; i < b_frames; i++) {
1265 s->reordered_input_picture[i + 1] = s->input_picture[i];
1266 s->reordered_input_picture[i + 1]->f.pict_type =
1268 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1269 s->coded_picture_number++;
1274 if (s->reordered_input_picture[0]) {
1275 s->reordered_input_picture[0]->reference =
1276 s->reordered_input_picture[0]->f.pict_type !=
1277 AV_PICTURE_TYPE_B ? 3 : 0;
1279 ff_mpeg_unref_picture(s, &s->new_picture);
1280 if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1283 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1284 // input is a shared pix, so we can't modifiy it -> alloc a new
1285 // one & ensure that the shared one is reuseable
1288 int i = ff_find_unused_picture(s, 0);
1291 pic = &s->picture[i];
1293 pic->reference = s->reordered_input_picture[0]->reference;
1294 if (ff_alloc_picture(s, pic, 0) < 0) {
1298 ret = av_frame_copy_props(&pic->f, &s->reordered_input_picture[0]->f);
1302 /* mark us unused / free shared pic */
1303 av_frame_unref(&s->reordered_input_picture[0]->f);
1304 s->reordered_input_picture[0]->shared = 0;
1306 s->current_picture_ptr = pic;
1308 // input is not a shared pix -> reuse buffer for current_pix
1309 s->current_picture_ptr = s->reordered_input_picture[0];
1310 for (i = 0; i < 4; i++) {
1311 s->new_picture.f.data[i] += INPLACE_OFFSET;
1314 ff_mpeg_unref_picture(s, &s->current_picture);
1315 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1316 s->current_picture_ptr)) < 0)
1319 s->picture_number = s->new_picture.f.display_picture_number;
1321 ff_mpeg_unref_picture(s, &s->new_picture);
1326 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1327 const AVFrame *pic_arg, int *got_packet)
1329 MpegEncContext *s = avctx->priv_data;
1330 int i, stuffing_count, ret;
1331 int context_count = s->slice_context_count;
1333 s->picture_in_gop_number++;
1335 if (load_input_picture(s, pic_arg) < 0)
1338 if (select_input_picture(s) < 0) {
1343 if (s->new_picture.f.data[0]) {
1345 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1348 s->mb_info_ptr = av_packet_new_side_data(pkt,
1349 AV_PKT_DATA_H263_MB_INFO,
1350 s->mb_width*s->mb_height*12);
1351 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1354 for (i = 0; i < context_count; i++) {
1355 int start_y = s->thread_context[i]->start_mb_y;
1356 int end_y = s->thread_context[i]-> end_mb_y;
1357 int h = s->mb_height;
1358 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1359 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1361 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1364 s->pict_type = s->new_picture.f.pict_type;
1366 ff_MPV_frame_start(s, avctx);
1368 if (encode_picture(s, s->picture_number) < 0)
1371 avctx->header_bits = s->header_bits;
1372 avctx->mv_bits = s->mv_bits;
1373 avctx->misc_bits = s->misc_bits;
1374 avctx->i_tex_bits = s->i_tex_bits;
1375 avctx->p_tex_bits = s->p_tex_bits;
1376 avctx->i_count = s->i_count;
1377 // FIXME f/b_count in avctx
1378 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1379 avctx->skip_count = s->skip_count;
1381 ff_MPV_frame_end(s);
1383 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1384 ff_mjpeg_encode_picture_trailer(s);
1386 if (avctx->rc_buffer_size) {
1387 RateControlContext *rcc = &s->rc_context;
1388 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1390 if (put_bits_count(&s->pb) > max_size &&
1391 s->lambda < s->avctx->lmax) {
1392 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1393 (s->qscale + 1) / s->qscale);
1394 if (s->adaptive_quant) {
1396 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1397 s->lambda_table[i] =
1398 FFMAX(s->lambda_table[i] + 1,
1399 s->lambda_table[i] * (s->qscale + 1) /
1402 s->mb_skipped = 0; // done in MPV_frame_start()
1403 // done in encode_picture() so we must undo it
1404 if (s->pict_type == AV_PICTURE_TYPE_P) {
1405 if (s->flipflop_rounding ||
1406 s->codec_id == AV_CODEC_ID_H263P ||
1407 s->codec_id == AV_CODEC_ID_MPEG4)
1408 s->no_rounding ^= 1;
1410 if (s->pict_type != AV_PICTURE_TYPE_B) {
1411 s->time_base = s->last_time_base;
1412 s->last_non_b_time = s->time - s->pp_time;
1414 for (i = 0; i < context_count; i++) {
1415 PutBitContext *pb = &s->thread_context[i]->pb;
1416 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1421 assert(s->avctx->rc_max_rate);
1424 if (s->flags & CODEC_FLAG_PASS1)
1425 ff_write_pass1_stats(s);
1427 for (i = 0; i < 4; i++) {
1428 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1429 avctx->error[i] += s->current_picture_ptr->f.error[i];
1432 if (s->flags & CODEC_FLAG_PASS1)
1433 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1434 avctx->i_tex_bits + avctx->p_tex_bits ==
1435 put_bits_count(&s->pb));
1436 flush_put_bits(&s->pb);
1437 s->frame_bits = put_bits_count(&s->pb);
1439 stuffing_count = ff_vbv_update(s, s->frame_bits);
1440 if (stuffing_count) {
1441 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1442 stuffing_count + 50) {
1443 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1447 switch (s->codec_id) {
1448 case AV_CODEC_ID_MPEG1VIDEO:
1449 case AV_CODEC_ID_MPEG2VIDEO:
1450 while (stuffing_count--) {
1451 put_bits(&s->pb, 8, 0);
1454 case AV_CODEC_ID_MPEG4:
1455 put_bits(&s->pb, 16, 0);
1456 put_bits(&s->pb, 16, 0x1C3);
1457 stuffing_count -= 4;
1458 while (stuffing_count--) {
1459 put_bits(&s->pb, 8, 0xFF);
1463 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1465 flush_put_bits(&s->pb);
1466 s->frame_bits = put_bits_count(&s->pb);
1469 /* update mpeg1/2 vbv_delay for CBR */
1470 if (s->avctx->rc_max_rate &&
1471 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1472 s->out_format == FMT_MPEG1 &&
1473 90000LL * (avctx->rc_buffer_size - 1) <=
1474 s->avctx->rc_max_rate * 0xFFFFLL) {
1475 int vbv_delay, min_delay;
1476 double inbits = s->avctx->rc_max_rate *
1477 av_q2d(s->avctx->time_base);
1478 int minbits = s->frame_bits - 8 *
1479 (s->vbv_delay_ptr - s->pb.buf - 1);
1480 double bits = s->rc_context.buffer_index + minbits - inbits;
1483 av_log(s->avctx, AV_LOG_ERROR,
1484 "Internal error, negative bits\n");
1486 assert(s->repeat_first_field == 0);
1488 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1489 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1490 s->avctx->rc_max_rate;
1492 vbv_delay = FFMAX(vbv_delay, min_delay);
1494 assert(vbv_delay < 0xFFFF);
1496 s->vbv_delay_ptr[0] &= 0xF8;
1497 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1498 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1499 s->vbv_delay_ptr[2] &= 0x07;
1500 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1501 avctx->vbv_delay = vbv_delay * 300;
1503 s->total_bits += s->frame_bits;
1504 avctx->frame_bits = s->frame_bits;
1506 pkt->pts = s->current_picture.f.pts;
1507 if (!s->low_delay) {
1508 if (!s->current_picture.f.coded_picture_number)
1509 pkt->dts = pkt->pts - s->dts_delta;
1511 pkt->dts = s->reordered_pts;
1512 s->reordered_pts = s->input_picture[0]->f.pts;
1514 pkt->dts = pkt->pts;
1515 if (s->current_picture.f.key_frame)
1516 pkt->flags |= AV_PKT_FLAG_KEY;
1518 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1522 assert((s->frame_bits & 7) == 0);
1524 pkt->size = s->frame_bits / 8;
1525 *got_packet = !!pkt->size;
1529 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1530 int n, int threshold)
1532 static const char tab[64] = {
1533 3, 2, 2, 1, 1, 1, 1, 1,
1534 1, 1, 1, 1, 1, 1, 1, 1,
1535 1, 1, 1, 1, 1, 1, 1, 1,
1536 0, 0, 0, 0, 0, 0, 0, 0,
1537 0, 0, 0, 0, 0, 0, 0, 0,
1538 0, 0, 0, 0, 0, 0, 0, 0,
1539 0, 0, 0, 0, 0, 0, 0, 0,
1540 0, 0, 0, 0, 0, 0, 0, 0
1545 int16_t *block = s->block[n];
1546 const int last_index = s->block_last_index[n];
1549 if (threshold < 0) {
1551 threshold = -threshold;
1555 /* Are all we could set to zero already zero? */
1556 if (last_index <= skip_dc - 1)
1559 for (i = 0; i <= last_index; i++) {
1560 const int j = s->intra_scantable.permutated[i];
1561 const int level = FFABS(block[j]);
1563 if (skip_dc && i == 0)
1567 } else if (level > 1) {
1573 if (score >= threshold)
1575 for (i = skip_dc; i <= last_index; i++) {
1576 const int j = s->intra_scantable.permutated[i];
1580 s->block_last_index[n] = 0;
1582 s->block_last_index[n] = -1;
1585 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1589 const int maxlevel = s->max_qcoeff;
1590 const int minlevel = s->min_qcoeff;
1594 i = 1; // skip clipping of intra dc
1598 for (; i <= last_index; i++) {
1599 const int j = s->intra_scantable.permutated[i];
1600 int level = block[j];
1602 if (level > maxlevel) {
1605 } else if (level < minlevel) {
1613 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1614 av_log(s->avctx, AV_LOG_INFO,
1615 "warning, clipping %d dct coefficients to %d..%d\n",
1616 overflow, minlevel, maxlevel);
1619 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1623 for (y = 0; y < 8; y++) {
1624 for (x = 0; x < 8; x++) {
1630 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1631 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1632 int v = ptr[x2 + y2 * stride];
1638 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1643 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1644 int motion_x, int motion_y,
1645 int mb_block_height,
1648 int16_t weight[8][64];
1649 int16_t orig[8][64];
1650 const int mb_x = s->mb_x;
1651 const int mb_y = s->mb_y;
1654 int dct_offset = s->linesize * 8; // default for progressive frames
1655 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1656 ptrdiff_t wrap_y, wrap_c;
1658 for (i = 0; i < mb_block_count; i++)
1659 skip_dct[i] = s->skipdct;
1661 if (s->adaptive_quant) {
1662 const int last_qp = s->qscale;
1663 const int mb_xy = mb_x + mb_y * s->mb_stride;
1665 s->lambda = s->lambda_table[mb_xy];
1668 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1669 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1670 s->dquant = s->qscale - last_qp;
1672 if (s->out_format == FMT_H263) {
1673 s->dquant = av_clip(s->dquant, -2, 2);
1675 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1677 if (s->pict_type == AV_PICTURE_TYPE_B) {
1678 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1681 if (s->mv_type == MV_TYPE_8X8)
1687 ff_set_qscale(s, last_qp + s->dquant);
1688 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1689 ff_set_qscale(s, s->qscale + s->dquant);
1691 wrap_y = s->linesize;
1692 wrap_c = s->uvlinesize;
1693 ptr_y = s->new_picture.f.data[0] +
1694 (mb_y * 16 * wrap_y) + mb_x * 16;
1695 ptr_cb = s->new_picture.f.data[1] +
1696 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1697 ptr_cr = s->new_picture.f.data[2] +
1698 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1700 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1701 uint8_t *ebuf = s->edge_emu_buffer + 32;
1702 s->vdsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1703 mb_y * 16, s->width, s->height);
1705 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
1706 mb_block_height, mb_x * 8, mb_y * 8,
1707 s->width >> 1, s->height >> 1);
1708 ptr_cb = ebuf + 18 * wrap_y;
1709 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
1710 mb_block_height, mb_x * 8, mb_y * 8,
1711 s->width >> 1, s->height >> 1);
1712 ptr_cr = ebuf + 18 * wrap_y + 8;
1716 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1717 int progressive_score, interlaced_score;
1719 s->interlaced_dct = 0;
1720 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1722 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1723 NULL, wrap_y, 8) - 400;
1725 if (progressive_score > 0) {
1726 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1727 NULL, wrap_y * 2, 8) +
1728 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1729 NULL, wrap_y * 2, 8);
1730 if (progressive_score > interlaced_score) {
1731 s->interlaced_dct = 1;
1733 dct_offset = wrap_y;
1735 if (s->chroma_format == CHROMA_422)
1741 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1742 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1743 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1744 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1746 if (s->flags & CODEC_FLAG_GRAY) {
1750 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1751 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1752 if (!s->chroma_y_shift) { /* 422 */
1753 s->dsp.get_pixels(s->block[6],
1754 ptr_cb + (dct_offset >> 1), wrap_c);
1755 s->dsp.get_pixels(s->block[7],
1756 ptr_cr + (dct_offset >> 1), wrap_c);
1760 op_pixels_func (*op_pix)[4];
1761 qpel_mc_func (*op_qpix)[16];
1762 uint8_t *dest_y, *dest_cb, *dest_cr;
1764 dest_y = s->dest[0];
1765 dest_cb = s->dest[1];
1766 dest_cr = s->dest[2];
1768 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1769 op_pix = s->hdsp.put_pixels_tab;
1770 op_qpix = s->dsp.put_qpel_pixels_tab;
1772 op_pix = s->hdsp.put_no_rnd_pixels_tab;
1773 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1776 if (s->mv_dir & MV_DIR_FORWARD) {
1777 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1778 s->last_picture.f.data,
1780 op_pix = s->hdsp.avg_pixels_tab;
1781 op_qpix = s->dsp.avg_qpel_pixels_tab;
1783 if (s->mv_dir & MV_DIR_BACKWARD) {
1784 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1785 s->next_picture.f.data,
1789 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1790 int progressive_score, interlaced_score;
1792 s->interlaced_dct = 0;
1793 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1796 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1797 ptr_y + wrap_y * 8, wrap_y,
1800 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1801 progressive_score -= 400;
1803 if (progressive_score > 0) {
1804 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1807 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1811 if (progressive_score > interlaced_score) {
1812 s->interlaced_dct = 1;
1814 dct_offset = wrap_y;
1816 if (s->chroma_format == CHROMA_422)
1822 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1823 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1824 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1825 dest_y + dct_offset, wrap_y);
1826 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1827 dest_y + dct_offset + 8, wrap_y);
1829 if (s->flags & CODEC_FLAG_GRAY) {
1833 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1834 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1835 if (!s->chroma_y_shift) { /* 422 */
1836 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1837 dest_cb + (dct_offset >> 1), wrap_c);
1838 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1839 dest_cr + (dct_offset >> 1), wrap_c);
1842 /* pre quantization */
1843 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1844 2 * s->qscale * s->qscale) {
1846 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1847 wrap_y, 8) < 20 * s->qscale)
1849 if (s->dsp.sad[1](NULL, ptr_y + 8,
1850 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1852 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1853 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1855 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1856 dest_y + dct_offset + 8,
1857 wrap_y, 8) < 20 * s->qscale)
1859 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1860 wrap_c, 8) < 20 * s->qscale)
1862 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1863 wrap_c, 8) < 20 * s->qscale)
1865 if (!s->chroma_y_shift) { /* 422 */
1866 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1867 dest_cb + (dct_offset >> 1),
1868 wrap_c, 8) < 20 * s->qscale)
1870 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1871 dest_cr + (dct_offset >> 1),
1872 wrap_c, 8) < 20 * s->qscale)
1878 if (s->quantizer_noise_shaping) {
1880 get_visual_weight(weight[0], ptr_y , wrap_y);
1882 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
1884 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
1886 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1888 get_visual_weight(weight[4], ptr_cb , wrap_c);
1890 get_visual_weight(weight[5], ptr_cr , wrap_c);
1891 if (!s->chroma_y_shift) { /* 422 */
1893 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1896 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1899 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
1902 /* DCT & quantize */
1903 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1905 for (i = 0; i < mb_block_count; i++) {
1908 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1909 // FIXME we could decide to change to quantizer instead of
1911 // JS: I don't think that would be a good idea it could lower
1912 // quality instead of improve it. Just INTRADC clipping
1913 // deserves changes in quantizer
1915 clip_coeffs(s, s->block[i], s->block_last_index[i]);
1917 s->block_last_index[i] = -1;
1919 if (s->quantizer_noise_shaping) {
1920 for (i = 0; i < mb_block_count; i++) {
1922 s->block_last_index[i] =
1923 dct_quantize_refine(s, s->block[i], weight[i],
1924 orig[i], i, s->qscale);
1929 if (s->luma_elim_threshold && !s->mb_intra)
1930 for (i = 0; i < 4; i++)
1931 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1932 if (s->chroma_elim_threshold && !s->mb_intra)
1933 for (i = 4; i < mb_block_count; i++)
1934 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1936 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
1937 for (i = 0; i < mb_block_count; i++) {
1938 if (s->block_last_index[i] == -1)
1939 s->coded_score[i] = INT_MAX / 256;
1944 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
1945 s->block_last_index[4] =
1946 s->block_last_index[5] = 0;
1948 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
1951 // non c quantize code returns incorrect block_last_index FIXME
1952 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
1953 for (i = 0; i < mb_block_count; i++) {
1955 if (s->block_last_index[i] > 0) {
1956 for (j = 63; j > 0; j--) {
1957 if (s->block[i][s->intra_scantable.permutated[j]])
1960 s->block_last_index[i] = j;
1965 /* huffman encode */
1966 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
1967 case AV_CODEC_ID_MPEG1VIDEO:
1968 case AV_CODEC_ID_MPEG2VIDEO:
1969 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1970 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
1972 case AV_CODEC_ID_MPEG4:
1973 if (CONFIG_MPEG4_ENCODER)
1974 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
1976 case AV_CODEC_ID_MSMPEG4V2:
1977 case AV_CODEC_ID_MSMPEG4V3:
1978 case AV_CODEC_ID_WMV1:
1979 if (CONFIG_MSMPEG4_ENCODER)
1980 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
1982 case AV_CODEC_ID_WMV2:
1983 if (CONFIG_WMV2_ENCODER)
1984 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
1986 case AV_CODEC_ID_H261:
1987 if (CONFIG_H261_ENCODER)
1988 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
1990 case AV_CODEC_ID_H263:
1991 case AV_CODEC_ID_H263P:
1992 case AV_CODEC_ID_FLV1:
1993 case AV_CODEC_ID_RV10:
1994 case AV_CODEC_ID_RV20:
1995 if (CONFIG_H263_ENCODER)
1996 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
1998 case AV_CODEC_ID_MJPEG:
1999 if (CONFIG_MJPEG_ENCODER)
2000 ff_mjpeg_encode_mb(s, s->block);
2007 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2009 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
2010 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2013 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2016 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2019 d->mb_skip_run= s->mb_skip_run;
2021 d->last_dc[i] = s->last_dc[i];
2024 d->mv_bits= s->mv_bits;
2025 d->i_tex_bits= s->i_tex_bits;
2026 d->p_tex_bits= s->p_tex_bits;
2027 d->i_count= s->i_count;
2028 d->f_count= s->f_count;
2029 d->b_count= s->b_count;
2030 d->skip_count= s->skip_count;
2031 d->misc_bits= s->misc_bits;
2035 d->qscale= s->qscale;
2036 d->dquant= s->dquant;
2038 d->esc3_level_length= s->esc3_level_length;
2041 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2044 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2045 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2048 d->mb_skip_run= s->mb_skip_run;
2050 d->last_dc[i] = s->last_dc[i];
2053 d->mv_bits= s->mv_bits;
2054 d->i_tex_bits= s->i_tex_bits;
2055 d->p_tex_bits= s->p_tex_bits;
2056 d->i_count= s->i_count;
2057 d->f_count= s->f_count;
2058 d->b_count= s->b_count;
2059 d->skip_count= s->skip_count;
2060 d->misc_bits= s->misc_bits;
2062 d->mb_intra= s->mb_intra;
2063 d->mb_skipped= s->mb_skipped;
2064 d->mv_type= s->mv_type;
2065 d->mv_dir= s->mv_dir;
2067 if(s->data_partitioning){
2069 d->tex_pb= s->tex_pb;
2073 d->block_last_index[i]= s->block_last_index[i];
2074 d->interlaced_dct= s->interlaced_dct;
2075 d->qscale= s->qscale;
2077 d->esc3_level_length= s->esc3_level_length;
2080 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2081 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2082 int *dmin, int *next_block, int motion_x, int motion_y)
2085 uint8_t *dest_backup[3];
2087 copy_context_before_encode(s, backup, type);
2089 s->block= s->blocks[*next_block];
2090 s->pb= pb[*next_block];
2091 if(s->data_partitioning){
2092 s->pb2 = pb2 [*next_block];
2093 s->tex_pb= tex_pb[*next_block];
2097 memcpy(dest_backup, s->dest, sizeof(s->dest));
2098 s->dest[0] = s->rd_scratchpad;
2099 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2100 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2101 assert(s->linesize >= 32); //FIXME
2104 encode_mb(s, motion_x, motion_y);
2106 score= put_bits_count(&s->pb);
2107 if(s->data_partitioning){
2108 score+= put_bits_count(&s->pb2);
2109 score+= put_bits_count(&s->tex_pb);
2112 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2113 ff_MPV_decode_mb(s, s->block);
2115 score *= s->lambda2;
2116 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2120 memcpy(s->dest, dest_backup, sizeof(s->dest));
2127 copy_context_after_encode(best, s, type);
2131 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2132 uint32_t *sq = ff_squareTbl + 256;
2137 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2138 else if(w==8 && h==8)
2139 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2143 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2152 static int sse_mb(MpegEncContext *s){
2156 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2157 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2160 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2161 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2162 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2163 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2165 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2166 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2167 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2170 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2171 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2172 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2175 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2176 MpegEncContext *s= *(void**)arg;
2180 s->me.dia_size= s->avctx->pre_dia_size;
2181 s->first_slice_line=1;
2182 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2183 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2184 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2186 s->first_slice_line=0;
2194 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2195 MpegEncContext *s= *(void**)arg;
2197 ff_check_alignment();
2199 s->me.dia_size= s->avctx->dia_size;
2200 s->first_slice_line=1;
2201 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2202 s->mb_x=0; //for block init below
2203 ff_init_block_index(s);
2204 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2205 s->block_index[0]+=2;
2206 s->block_index[1]+=2;
2207 s->block_index[2]+=2;
2208 s->block_index[3]+=2;
2210 /* compute motion vector & mb_type and store in context */
2211 if(s->pict_type==AV_PICTURE_TYPE_B)
2212 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2214 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2216 s->first_slice_line=0;
2221 static int mb_var_thread(AVCodecContext *c, void *arg){
2222 MpegEncContext *s= *(void**)arg;
2225 ff_check_alignment();
2227 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2228 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2231 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2233 int sum = s->dsp.pix_sum(pix, s->linesize);
2235 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2237 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2238 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2239 s->me.mb_var_sum_temp += varc;
2245 static void write_slice_end(MpegEncContext *s){
2246 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2247 if(s->partitioned_frame){
2248 ff_mpeg4_merge_partitions(s);
2251 ff_mpeg4_stuffing(&s->pb);
2252 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2253 ff_mjpeg_encode_stuffing(&s->pb);
2256 avpriv_align_put_bits(&s->pb);
2257 flush_put_bits(&s->pb);
2259 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2260 s->misc_bits+= get_bits_diff(s);
2263 static void write_mb_info(MpegEncContext *s)
2265 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2266 int offset = put_bits_count(&s->pb);
2267 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2268 int gobn = s->mb_y / s->gob_index;
2270 if (CONFIG_H263_ENCODER)
2271 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2272 bytestream_put_le32(&ptr, offset);
2273 bytestream_put_byte(&ptr, s->qscale);
2274 bytestream_put_byte(&ptr, gobn);
2275 bytestream_put_le16(&ptr, mba);
2276 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2277 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2278 /* 4MV not implemented */
2279 bytestream_put_byte(&ptr, 0); /* hmv2 */
2280 bytestream_put_byte(&ptr, 0); /* vmv2 */
2283 static void update_mb_info(MpegEncContext *s, int startcode)
2287 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2288 s->mb_info_size += 12;
2289 s->prev_mb_info = s->last_mb_info;
2292 s->prev_mb_info = put_bits_count(&s->pb)/8;
2293 /* This might have incremented mb_info_size above, and we return without
2294 * actually writing any info into that slot yet. But in that case,
2295 * this will be called again at the start of the after writing the
2296 * start code, actually writing the mb info. */
2300 s->last_mb_info = put_bits_count(&s->pb)/8;
2301 if (!s->mb_info_size)
2302 s->mb_info_size += 12;
2306 static int encode_thread(AVCodecContext *c, void *arg){
2307 MpegEncContext *s= *(void**)arg;
2308 int mb_x, mb_y, pdif = 0;
2309 int chr_h= 16>>s->chroma_y_shift;
2311 MpegEncContext best_s, backup_s;
2312 uint8_t bit_buf[2][MAX_MB_BYTES];
2313 uint8_t bit_buf2[2][MAX_MB_BYTES];
2314 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2315 PutBitContext pb[2], pb2[2], tex_pb[2];
2317 ff_check_alignment();
2320 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2321 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2322 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2325 s->last_bits= put_bits_count(&s->pb);
2336 /* init last dc values */
2337 /* note: quant matrix value (8) is implied here */
2338 s->last_dc[i] = 128 << s->intra_dc_precision;
2340 s->current_picture.f.error[i] = 0;
2343 memset(s->last_mv, 0, sizeof(s->last_mv));
2347 switch(s->codec_id){
2348 case AV_CODEC_ID_H263:
2349 case AV_CODEC_ID_H263P:
2350 case AV_CODEC_ID_FLV1:
2351 if (CONFIG_H263_ENCODER)
2352 s->gob_index = ff_h263_get_gob_height(s);
2354 case AV_CODEC_ID_MPEG4:
2355 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2356 ff_mpeg4_init_partitions(s);
2362 s->first_slice_line = 1;
2363 s->ptr_lastgob = s->pb.buf;
2364 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2368 ff_set_qscale(s, s->qscale);
2369 ff_init_block_index(s);
2371 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2372 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2373 int mb_type= s->mb_type[xy];
2378 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2379 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2382 if(s->data_partitioning){
2383 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2384 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2385 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2391 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2392 ff_update_block_index(s);
2394 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2395 ff_h261_reorder_mb_index(s);
2396 xy= s->mb_y*s->mb_stride + s->mb_x;
2397 mb_type= s->mb_type[xy];
2400 /* write gob / video packet header */
2402 int current_packet_size, is_gob_start;
2404 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2406 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2408 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2410 switch(s->codec_id){
2411 case AV_CODEC_ID_H263:
2412 case AV_CODEC_ID_H263P:
2413 if(!s->h263_slice_structured)
2414 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2416 case AV_CODEC_ID_MPEG2VIDEO:
2417 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2418 case AV_CODEC_ID_MPEG1VIDEO:
2419 if(s->mb_skip_run) is_gob_start=0;
2424 if(s->start_mb_y != mb_y || mb_x!=0){
2427 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2428 ff_mpeg4_init_partitions(s);
2432 assert((put_bits_count(&s->pb)&7) == 0);
2433 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2435 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2436 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2437 int d = 100 / s->error_rate;
2439 current_packet_size=0;
2440 s->pb.buf_ptr= s->ptr_lastgob;
2441 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2445 if (s->avctx->rtp_callback){
2446 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2447 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2449 update_mb_info(s, 1);
2451 switch(s->codec_id){
2452 case AV_CODEC_ID_MPEG4:
2453 if (CONFIG_MPEG4_ENCODER) {
2454 ff_mpeg4_encode_video_packet_header(s);
2455 ff_mpeg4_clean_buffers(s);
2458 case AV_CODEC_ID_MPEG1VIDEO:
2459 case AV_CODEC_ID_MPEG2VIDEO:
2460 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2461 ff_mpeg1_encode_slice_header(s);
2462 ff_mpeg1_clean_buffers(s);
2465 case AV_CODEC_ID_H263:
2466 case AV_CODEC_ID_H263P:
2467 if (CONFIG_H263_ENCODER)
2468 ff_h263_encode_gob_header(s, mb_y);
2472 if(s->flags&CODEC_FLAG_PASS1){
2473 int bits= put_bits_count(&s->pb);
2474 s->misc_bits+= bits - s->last_bits;
2478 s->ptr_lastgob += current_packet_size;
2479 s->first_slice_line=1;
2480 s->resync_mb_x=mb_x;
2481 s->resync_mb_y=mb_y;
2485 if( (s->resync_mb_x == s->mb_x)
2486 && s->resync_mb_y+1 == s->mb_y){
2487 s->first_slice_line=0;
2491 s->dquant=0; //only for QP_RD
2493 update_mb_info(s, 0);
2495 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2497 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2499 copy_context_before_encode(&backup_s, s, -1);
2501 best_s.data_partitioning= s->data_partitioning;
2502 best_s.partitioned_frame= s->partitioned_frame;
2503 if(s->data_partitioning){
2504 backup_s.pb2= s->pb2;
2505 backup_s.tex_pb= s->tex_pb;
2508 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2509 s->mv_dir = MV_DIR_FORWARD;
2510 s->mv_type = MV_TYPE_16X16;
2512 s->mv[0][0][0] = s->p_mv_table[xy][0];
2513 s->mv[0][0][1] = s->p_mv_table[xy][1];
2514 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2515 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2517 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2518 s->mv_dir = MV_DIR_FORWARD;
2519 s->mv_type = MV_TYPE_FIELD;
2522 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2523 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2524 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2526 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2527 &dmin, &next_block, 0, 0);
2529 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2530 s->mv_dir = MV_DIR_FORWARD;
2531 s->mv_type = MV_TYPE_16X16;
2535 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2536 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2538 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2539 s->mv_dir = MV_DIR_FORWARD;
2540 s->mv_type = MV_TYPE_8X8;
2543 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2544 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2546 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2547 &dmin, &next_block, 0, 0);
2549 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2550 s->mv_dir = MV_DIR_FORWARD;
2551 s->mv_type = MV_TYPE_16X16;
2553 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2554 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2555 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2556 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2558 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2559 s->mv_dir = MV_DIR_BACKWARD;
2560 s->mv_type = MV_TYPE_16X16;
2562 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2563 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2564 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2565 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2567 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2568 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2569 s->mv_type = MV_TYPE_16X16;
2571 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2572 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2573 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2574 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2575 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2576 &dmin, &next_block, 0, 0);
2578 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2579 s->mv_dir = MV_DIR_FORWARD;
2580 s->mv_type = MV_TYPE_FIELD;
2583 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2584 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2585 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2587 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2588 &dmin, &next_block, 0, 0);
2590 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2591 s->mv_dir = MV_DIR_BACKWARD;
2592 s->mv_type = MV_TYPE_FIELD;
2595 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2596 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2597 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2599 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2600 &dmin, &next_block, 0, 0);
2602 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2603 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2604 s->mv_type = MV_TYPE_FIELD;
2606 for(dir=0; dir<2; dir++){
2608 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2609 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2610 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2613 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2614 &dmin, &next_block, 0, 0);
2616 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2618 s->mv_type = MV_TYPE_16X16;
2622 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2623 &dmin, &next_block, 0, 0);
2624 if(s->h263_pred || s->h263_aic){
2626 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2628 ff_clean_intra_table_entries(s); //old mode?
2632 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2633 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2634 const int last_qp= backup_s.qscale;
2637 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2638 static const int dquant_tab[4]={-1,1,-2,2};
2640 assert(backup_s.dquant == 0);
2643 s->mv_dir= best_s.mv_dir;
2644 s->mv_type = MV_TYPE_16X16;
2645 s->mb_intra= best_s.mb_intra;
2646 s->mv[0][0][0] = best_s.mv[0][0][0];
2647 s->mv[0][0][1] = best_s.mv[0][0][1];
2648 s->mv[1][0][0] = best_s.mv[1][0][0];
2649 s->mv[1][0][1] = best_s.mv[1][0][1];
2651 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2652 for(; qpi<4; qpi++){
2653 int dquant= dquant_tab[qpi];
2654 qp= last_qp + dquant;
2655 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2657 backup_s.dquant= dquant;
2658 if(s->mb_intra && s->dc_val[0]){
2660 dc[i]= s->dc_val[0][ s->block_index[i] ];
2661 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2665 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2666 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2667 if(best_s.qscale != qp){
2668 if(s->mb_intra && s->dc_val[0]){
2670 s->dc_val[0][ s->block_index[i] ]= dc[i];
2671 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2678 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2679 int mx= s->b_direct_mv_table[xy][0];
2680 int my= s->b_direct_mv_table[xy][1];
2682 backup_s.dquant = 0;
2683 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2685 ff_mpeg4_set_direct_mv(s, mx, my);
2686 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2687 &dmin, &next_block, mx, my);
2689 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2690 backup_s.dquant = 0;
2691 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2693 ff_mpeg4_set_direct_mv(s, 0, 0);
2694 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2695 &dmin, &next_block, 0, 0);
2697 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2700 coded |= s->block_last_index[i];
2703 memcpy(s->mv, best_s.mv, sizeof(s->mv));
2704 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2705 mx=my=0; //FIXME find the one we actually used
2706 ff_mpeg4_set_direct_mv(s, mx, my);
2707 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2715 s->mv_dir= best_s.mv_dir;
2716 s->mv_type = best_s.mv_type;
2718 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2719 s->mv[0][0][1] = best_s.mv[0][0][1];
2720 s->mv[1][0][0] = best_s.mv[1][0][0];
2721 s->mv[1][0][1] = best_s.mv[1][0][1];*/
2724 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2725 &dmin, &next_block, mx, my);
2730 s->current_picture.qscale_table[xy] = best_s.qscale;
2732 copy_context_after_encode(s, &best_s, -1);
2734 pb_bits_count= put_bits_count(&s->pb);
2735 flush_put_bits(&s->pb);
2736 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2739 if(s->data_partitioning){
2740 pb2_bits_count= put_bits_count(&s->pb2);
2741 flush_put_bits(&s->pb2);
2742 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2743 s->pb2= backup_s.pb2;
2745 tex_pb_bits_count= put_bits_count(&s->tex_pb);
2746 flush_put_bits(&s->tex_pb);
2747 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2748 s->tex_pb= backup_s.tex_pb;
2750 s->last_bits= put_bits_count(&s->pb);
2752 if (CONFIG_H263_ENCODER &&
2753 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2754 ff_h263_update_motion_val(s);
2756 if(next_block==0){ //FIXME 16 vs linesize16
2757 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2758 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2759 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2762 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2763 ff_MPV_decode_mb(s, s->block);
2765 int motion_x = 0, motion_y = 0;
2766 s->mv_type=MV_TYPE_16X16;
2767 // only one MB-Type possible
2770 case CANDIDATE_MB_TYPE_INTRA:
2773 motion_x= s->mv[0][0][0] = 0;
2774 motion_y= s->mv[0][0][1] = 0;
2776 case CANDIDATE_MB_TYPE_INTER:
2777 s->mv_dir = MV_DIR_FORWARD;
2779 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2780 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2782 case CANDIDATE_MB_TYPE_INTER_I:
2783 s->mv_dir = MV_DIR_FORWARD;
2784 s->mv_type = MV_TYPE_FIELD;
2787 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2788 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2789 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2792 case CANDIDATE_MB_TYPE_INTER4V:
2793 s->mv_dir = MV_DIR_FORWARD;
2794 s->mv_type = MV_TYPE_8X8;
2797 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2798 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2801 case CANDIDATE_MB_TYPE_DIRECT:
2802 if (CONFIG_MPEG4_ENCODER) {
2803 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2805 motion_x=s->b_direct_mv_table[xy][0];
2806 motion_y=s->b_direct_mv_table[xy][1];
2807 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2810 case CANDIDATE_MB_TYPE_DIRECT0:
2811 if (CONFIG_MPEG4_ENCODER) {
2812 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2814 ff_mpeg4_set_direct_mv(s, 0, 0);
2817 case CANDIDATE_MB_TYPE_BIDIR:
2818 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2820 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2821 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2822 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2823 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2825 case CANDIDATE_MB_TYPE_BACKWARD:
2826 s->mv_dir = MV_DIR_BACKWARD;
2828 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2829 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2831 case CANDIDATE_MB_TYPE_FORWARD:
2832 s->mv_dir = MV_DIR_FORWARD;
2834 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2835 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2837 case CANDIDATE_MB_TYPE_FORWARD_I:
2838 s->mv_dir = MV_DIR_FORWARD;
2839 s->mv_type = MV_TYPE_FIELD;
2842 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2843 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2844 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2847 case CANDIDATE_MB_TYPE_BACKWARD_I:
2848 s->mv_dir = MV_DIR_BACKWARD;
2849 s->mv_type = MV_TYPE_FIELD;
2852 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2853 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2854 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2857 case CANDIDATE_MB_TYPE_BIDIR_I:
2858 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2859 s->mv_type = MV_TYPE_FIELD;
2861 for(dir=0; dir<2; dir++){
2863 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2864 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2865 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2870 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2873 encode_mb(s, motion_x, motion_y);
2875 // RAL: Update last macroblock type
2876 s->last_mv_dir = s->mv_dir;
2878 if (CONFIG_H263_ENCODER &&
2879 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2880 ff_h263_update_motion_val(s);
2882 ff_MPV_decode_mb(s, s->block);
2885 /* clean the MV table in IPS frames for direct mode in B frames */
2886 if(s->mb_intra /* && I,P,S_TYPE */){
2887 s->p_mv_table[xy][0]=0;
2888 s->p_mv_table[xy][1]=0;
2891 if(s->flags&CODEC_FLAG_PSNR){
2895 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2896 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2898 s->current_picture.f.error[0] += sse(
2899 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2900 s->dest[0], w, h, s->linesize);
2901 s->current_picture.f.error[1] += sse(
2902 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2903 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2904 s->current_picture.f.error[2] += sse(
2905 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2906 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2909 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2910 ff_h263_loop_filter(s);
2912 av_dlog(s->avctx, "MB %d %d bits\n",
2913 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
2917 //not beautiful here but we must write it before flushing so it has to be here
2918 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2919 ff_msmpeg4_encode_ext_header(s);
2923 /* Send the last GOB if RTP */
2924 if (s->avctx->rtp_callback) {
2925 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2926 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2927 /* Call the RTP callback to send the last GOB */
2929 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2935 #define MERGE(field) dst->field += src->field; src->field=0
2936 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2937 MERGE(me.scene_change_score);
2938 MERGE(me.mc_mb_var_sum_temp);
2939 MERGE(me.mb_var_sum_temp);
2942 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2945 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2946 MERGE(dct_count[1]);
2955 MERGE(er.error_count);
2956 MERGE(padding_bug_score);
2957 MERGE(current_picture.f.error[0]);
2958 MERGE(current_picture.f.error[1]);
2959 MERGE(current_picture.f.error[2]);
2961 if(dst->avctx->noise_reduction){
2962 for(i=0; i<64; i++){
2963 MERGE(dct_error_sum[0][i]);
2964 MERGE(dct_error_sum[1][i]);
2968 assert(put_bits_count(&src->pb) % 8 ==0);
2969 assert(put_bits_count(&dst->pb) % 8 ==0);
2970 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
2971 flush_put_bits(&dst->pb);
2974 static int estimate_qp(MpegEncContext *s, int dry_run){
2975 if (s->next_lambda){
2976 s->current_picture_ptr->f.quality =
2977 s->current_picture.f.quality = s->next_lambda;
2978 if(!dry_run) s->next_lambda= 0;
2979 } else if (!s->fixed_qscale) {
2980 s->current_picture_ptr->f.quality =
2981 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
2982 if (s->current_picture.f.quality < 0)
2986 if(s->adaptive_quant){
2987 switch(s->codec_id){
2988 case AV_CODEC_ID_MPEG4:
2989 if (CONFIG_MPEG4_ENCODER)
2990 ff_clean_mpeg4_qscales(s);
2992 case AV_CODEC_ID_H263:
2993 case AV_CODEC_ID_H263P:
2994 case AV_CODEC_ID_FLV1:
2995 if (CONFIG_H263_ENCODER)
2996 ff_clean_h263_qscales(s);
2999 ff_init_qscale_tab(s);
3002 s->lambda= s->lambda_table[0];
3005 s->lambda = s->current_picture.f.quality;
3010 /* must be called before writing the header */
3011 static void set_frame_distances(MpegEncContext * s){
3012 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3013 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3015 if(s->pict_type==AV_PICTURE_TYPE_B){
3016 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3017 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3019 s->pp_time= s->time - s->last_non_b_time;
3020 s->last_non_b_time= s->time;
3021 assert(s->picture_number==0 || s->pp_time > 0);
3025 static int encode_picture(MpegEncContext *s, int picture_number)
3029 int context_count = s->slice_context_count;
3031 s->picture_number = picture_number;
3033 /* Reset the average MB variance */
3034 s->me.mb_var_sum_temp =
3035 s->me.mc_mb_var_sum_temp = 0;
3037 /* we need to initialize some time vars before we can encode b-frames */
3038 // RAL: Condition added for MPEG1VIDEO
3039 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3040 set_frame_distances(s);
3041 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3042 ff_set_mpeg4_time(s);
3044 s->me.scene_change_score=0;
3046 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3048 if(s->pict_type==AV_PICTURE_TYPE_I){
3049 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3050 else s->no_rounding=0;
3051 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3052 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3053 s->no_rounding ^= 1;
3056 if(s->flags & CODEC_FLAG_PASS2){
3057 if (estimate_qp(s,1) < 0)
3059 ff_get_2pass_fcode(s);
3060 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3061 if(s->pict_type==AV_PICTURE_TYPE_B)
3062 s->lambda= s->last_lambda_for[s->pict_type];
3064 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3068 s->mb_intra=0; //for the rate distortion & bit compare functions
3069 for(i=1; i<context_count; i++){
3070 ret = ff_update_duplicate_context(s->thread_context[i], s);
3078 /* Estimate motion for every MB */
3079 if(s->pict_type != AV_PICTURE_TYPE_I){
3080 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3081 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3082 if (s->pict_type != AV_PICTURE_TYPE_B) {
3083 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3084 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3088 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3089 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3091 for(i=0; i<s->mb_stride*s->mb_height; i++)
3092 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3094 if(!s->fixed_qscale){
3095 /* finding spatial complexity for I-frame rate control */
3096 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3099 for(i=1; i<context_count; i++){
3100 merge_context_after_me(s, s->thread_context[i]);
3102 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3103 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3106 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3107 s->pict_type= AV_PICTURE_TYPE_I;
3108 for(i=0; i<s->mb_stride*s->mb_height; i++)
3109 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3110 av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3111 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3115 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3116 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3118 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3120 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3121 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3122 s->f_code= FFMAX3(s->f_code, a, b);
3125 ff_fix_long_p_mvs(s);
3126 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3127 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3131 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3132 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3137 if(s->pict_type==AV_PICTURE_TYPE_B){
3140 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3141 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3142 s->f_code = FFMAX(a, b);
3144 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3145 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3146 s->b_code = FFMAX(a, b);
3148 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3149 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3150 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3151 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3152 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3154 for(dir=0; dir<2; dir++){
3157 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3158 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3159 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3160 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3168 if (estimate_qp(s, 0) < 0)
3171 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3172 s->qscale= 3; //reduce clipping problems
3174 if (s->out_format == FMT_MJPEG) {
3175 /* for mjpeg, we do include qscale in the matrix */
3177 int j= s->dsp.idct_permutation[i];
3179 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3181 s->y_dc_scale_table=
3182 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3183 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3184 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3185 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3189 //FIXME var duplication
3190 s->current_picture_ptr->f.key_frame =
3191 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3192 s->current_picture_ptr->f.pict_type =
3193 s->current_picture.f.pict_type = s->pict_type;
3195 if (s->current_picture.f.key_frame)
3196 s->picture_in_gop_number=0;
3198 s->last_bits= put_bits_count(&s->pb);
3199 switch(s->out_format) {
3201 if (CONFIG_MJPEG_ENCODER)
3202 ff_mjpeg_encode_picture_header(s);
3205 if (CONFIG_H261_ENCODER)
3206 ff_h261_encode_picture_header(s, picture_number);
3209 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3210 ff_wmv2_encode_picture_header(s, picture_number);
3211 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3212 ff_msmpeg4_encode_picture_header(s, picture_number);
3213 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3214 ff_mpeg4_encode_picture_header(s, picture_number);
3215 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3216 ff_rv10_encode_picture_header(s, picture_number);
3217 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3218 ff_rv20_encode_picture_header(s, picture_number);
3219 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3220 ff_flv_encode_picture_header(s, picture_number);
3221 else if (CONFIG_H263_ENCODER)
3222 ff_h263_encode_picture_header(s, picture_number);
3225 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3226 ff_mpeg1_encode_picture_header(s, picture_number);
3231 bits= put_bits_count(&s->pb);
3232 s->header_bits= bits - s->last_bits;
3234 for(i=1; i<context_count; i++){
3235 update_duplicate_context_after_me(s->thread_context[i], s);
3237 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3238 for(i=1; i<context_count; i++){
3239 merge_context_after_encode(s, s->thread_context[i]);
3245 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3246 const int intra= s->mb_intra;
3249 s->dct_count[intra]++;
3251 for(i=0; i<64; i++){
3252 int level= block[i];
3256 s->dct_error_sum[intra][i] += level;
3257 level -= s->dct_offset[intra][i];
3258 if(level<0) level=0;
3260 s->dct_error_sum[intra][i] -= level;
3261 level += s->dct_offset[intra][i];
3262 if(level>0) level=0;
3269 static int dct_quantize_trellis_c(MpegEncContext *s,
3270 int16_t *block, int n,
3271 int qscale, int *overflow){
3273 const uint8_t *scantable= s->intra_scantable.scantable;
3274 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3276 unsigned int threshold1, threshold2;
3288 int coeff_count[64];
3289 int qmul, qadd, start_i, last_non_zero, i, dc;
3290 const int esc_length= s->ac_esc_length;
3292 uint8_t * last_length;
3293 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3295 s->dsp.fdct (block);
3297 if(s->dct_error_sum)
3298 s->denoise_dct(s, block);
3300 qadd= ((qscale-1)|1)*8;
3311 /* For AIC we skip quant/dequant of INTRADC */
3316 /* note: block[0] is assumed to be positive */
3317 block[0] = (block[0] + (q >> 1)) / q;
3320 qmat = s->q_intra_matrix[qscale];
3321 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3322 bias= 1<<(QMAT_SHIFT-1);
3323 length = s->intra_ac_vlc_length;
3324 last_length= s->intra_ac_vlc_last_length;
3328 qmat = s->q_inter_matrix[qscale];
3329 length = s->inter_ac_vlc_length;
3330 last_length= s->inter_ac_vlc_last_length;
3334 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3335 threshold2= (threshold1<<1);
3337 for(i=63; i>=start_i; i--) {
3338 const int j = scantable[i];
3339 int level = block[j] * qmat[j];
3341 if(((unsigned)(level+threshold1))>threshold2){
3347 for(i=start_i; i<=last_non_zero; i++) {
3348 const int j = scantable[i];
3349 int level = block[j] * qmat[j];
3351 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3352 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3353 if(((unsigned)(level+threshold1))>threshold2){
3355 level= (bias + level)>>QMAT_SHIFT;
3357 coeff[1][i]= level-1;
3358 // coeff[2][k]= level-2;
3360 level= (bias - level)>>QMAT_SHIFT;
3361 coeff[0][i]= -level;
3362 coeff[1][i]= -level+1;
3363 // coeff[2][k]= -level+2;
3365 coeff_count[i]= FFMIN(level, 2);
3366 assert(coeff_count[i]);
3369 coeff[0][i]= (level>>31)|1;
3374 *overflow= s->max_qcoeff < max; //overflow might have happened
3376 if(last_non_zero < start_i){
3377 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3378 return last_non_zero;
3381 score_tab[start_i]= 0;
3382 survivor[0]= start_i;
3385 for(i=start_i; i<=last_non_zero; i++){
3386 int level_index, j, zero_distortion;
3387 int dct_coeff= FFABS(block[ scantable[i] ]);
3388 int best_score=256*256*256*120;
3390 if (s->dsp.fdct == ff_fdct_ifast)
3391 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3392 zero_distortion= dct_coeff*dct_coeff;
3394 for(level_index=0; level_index < coeff_count[i]; level_index++){
3396 int level= coeff[level_index][i];
3397 const int alevel= FFABS(level);
3402 if(s->out_format == FMT_H263){
3403 unquant_coeff= alevel*qmul + qadd;
3405 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3407 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3408 unquant_coeff = (unquant_coeff - 1) | 1;
3410 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3411 unquant_coeff = (unquant_coeff - 1) | 1;
3416 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3418 if((level&(~127)) == 0){
3419 for(j=survivor_count-1; j>=0; j--){
3420 int run= i - survivor[j];
3421 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3422 score += score_tab[i-run];
3424 if(score < best_score){
3427 level_tab[i+1]= level-64;
3431 if(s->out_format == FMT_H263){
3432 for(j=survivor_count-1; j>=0; j--){
3433 int run= i - survivor[j];
3434 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3435 score += score_tab[i-run];
3436 if(score < last_score){
3439 last_level= level-64;
3445 distortion += esc_length*lambda;
3446 for(j=survivor_count-1; j>=0; j--){
3447 int run= i - survivor[j];
3448 int score= distortion + score_tab[i-run];
3450 if(score < best_score){
3453 level_tab[i+1]= level-64;
3457 if(s->out_format == FMT_H263){
3458 for(j=survivor_count-1; j>=0; j--){
3459 int run= i - survivor[j];
3460 int score= distortion + score_tab[i-run];
3461 if(score < last_score){
3464 last_level= level-64;
3472 score_tab[i+1]= best_score;
3474 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3475 if(last_non_zero <= 27){
3476 for(; survivor_count; survivor_count--){
3477 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3481 for(; survivor_count; survivor_count--){
3482 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3487 survivor[ survivor_count++ ]= i+1;
3490 if(s->out_format != FMT_H263){
3491 last_score= 256*256*256*120;
3492 for(i= survivor[0]; i<=last_non_zero + 1; i++){
3493 int score= score_tab[i];
3494 if(i) score += lambda*2; //FIXME exacter?
3496 if(score < last_score){
3499 last_level= level_tab[i];
3500 last_run= run_tab[i];
3505 s->coded_score[n] = last_score;
3507 dc= FFABS(block[0]);
3508 last_non_zero= last_i - 1;
3509 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3511 if(last_non_zero < start_i)
3512 return last_non_zero;
3514 if(last_non_zero == 0 && start_i == 0){
3516 int best_score= dc * dc;
3518 for(i=0; i<coeff_count[0]; i++){
3519 int level= coeff[i][0];
3520 int alevel= FFABS(level);
3521 int unquant_coeff, score, distortion;
3523 if(s->out_format == FMT_H263){
3524 unquant_coeff= (alevel*qmul + qadd)>>3;
3526 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3527 unquant_coeff = (unquant_coeff - 1) | 1;
3529 unquant_coeff = (unquant_coeff + 4) >> 3;
3530 unquant_coeff<<= 3 + 3;
3532 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3534 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3535 else score= distortion + esc_length*lambda;
3537 if(score < best_score){
3539 best_level= level - 64;
3542 block[0]= best_level;
3543 s->coded_score[n] = best_score - dc*dc;
3544 if(best_level == 0) return -1;
3545 else return last_non_zero;
3551 block[ perm_scantable[last_non_zero] ]= last_level;
3554 for(; i>start_i; i -= run_tab[i] + 1){
3555 block[ perm_scantable[i-1] ]= level_tab[i];
3558 return last_non_zero;
3561 //#define REFINE_STATS 1
3562 static int16_t basis[64][64];
3564 static void build_basis(uint8_t *perm){
3571 double s= 0.25*(1<<BASIS_SHIFT);
3573 int perm_index= perm[index];
3574 if(i==0) s*= sqrt(0.5);
3575 if(j==0) s*= sqrt(0.5);
3576 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3583 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3584 int16_t *block, int16_t *weight, int16_t *orig,
3587 LOCAL_ALIGNED_16(int16_t, d1, [64]);
3588 const uint8_t *scantable= s->intra_scantable.scantable;
3589 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3590 // unsigned int threshold1, threshold2;
3595 int qmul, qadd, start_i, last_non_zero, i, dc;
3597 uint8_t * last_length;
3599 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3602 static int after_last=0;
3603 static int to_zero=0;
3604 static int from_zero=0;
3607 static int messed_sign=0;
3610 if(basis[0][0] == 0)
3611 build_basis(s->dsp.idct_permutation);
3622 /* For AIC we skip quant/dequant of INTRADC */
3626 q <<= RECON_SHIFT-3;
3627 /* note: block[0] is assumed to be positive */
3629 // block[0] = (block[0] + (q >> 1)) / q;
3631 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3632 // bias= 1<<(QMAT_SHIFT-1);
3633 length = s->intra_ac_vlc_length;
3634 last_length= s->intra_ac_vlc_last_length;
3638 length = s->inter_ac_vlc_length;
3639 last_length= s->inter_ac_vlc_last_length;
3641 last_non_zero = s->block_last_index[n];
3646 dc += (1<<(RECON_SHIFT-1));
3647 for(i=0; i<64; i++){
3648 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3651 STOP_TIMER("memset rem[]")}
3654 for(i=0; i<64; i++){
3659 w= FFABS(weight[i]) + qns*one;
3660 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3663 // w=weight[i] = (63*qns + (w/2)) / w;
3669 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3675 for(i=start_i; i<=last_non_zero; i++){
3676 int j= perm_scantable[i];
3677 const int level= block[j];
3681 if(level<0) coeff= qmul*level - qadd;
3682 else coeff= qmul*level + qadd;
3683 run_tab[rle_index++]=run;
3686 s->dsp.add_8x8basis(rem, basis[j], coeff);
3692 if(last_non_zero>0){
3693 STOP_TIMER("init rem[]")
3700 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3703 int run2, best_unquant_change=0, analyze_gradient;
3707 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3709 if(analyze_gradient){
3713 for(i=0; i<64; i++){
3716 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3719 STOP_TIMER("rem*w*w")}
3729 const int level= block[0];
3730 int change, old_coeff;
3732 assert(s->mb_intra);
3736 for(change=-1; change<=1; change+=2){
3737 int new_level= level + change;
3738 int score, new_coeff;
3740 new_coeff= q*new_level;
3741 if(new_coeff >= 2048 || new_coeff < 0)
3744 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3745 if(score<best_score){
3748 best_change= change;
3749 best_unquant_change= new_coeff - old_coeff;
3756 run2= run_tab[rle_index++];
3760 for(i=start_i; i<64; i++){
3761 int j= perm_scantable[i];
3762 const int level= block[j];
3763 int change, old_coeff;
3765 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3769 if(level<0) old_coeff= qmul*level - qadd;
3770 else old_coeff= qmul*level + qadd;
3771 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3775 assert(run2>=0 || i >= last_non_zero );
3778 for(change=-1; change<=1; change+=2){
3779 int new_level= level + change;
3780 int score, new_coeff, unquant_change;
3783 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3787 if(new_level<0) new_coeff= qmul*new_level - qadd;
3788 else new_coeff= qmul*new_level + qadd;
3789 if(new_coeff >= 2048 || new_coeff <= -2048)
3791 //FIXME check for overflow
3794 if(level < 63 && level > -63){
3795 if(i < last_non_zero)
3796 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
3797 - length[UNI_AC_ENC_INDEX(run, level+64)];
3799 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3800 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3803 assert(FFABS(new_level)==1);
3805 if(analyze_gradient){
3806 int g= d1[ scantable[i] ];
3807 if(g && (g^new_level) >= 0)
3811 if(i < last_non_zero){
3812 int next_i= i + run2 + 1;
3813 int next_level= block[ perm_scantable[next_i] ] + 64;
3815 if(next_level&(~127))
3818 if(next_i < last_non_zero)
3819 score += length[UNI_AC_ENC_INDEX(run, 65)]
3820 + length[UNI_AC_ENC_INDEX(run2, next_level)]
3821 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3823 score += length[UNI_AC_ENC_INDEX(run, 65)]
3824 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3825 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3827 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3829 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3830 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3836 assert(FFABS(level)==1);
3838 if(i < last_non_zero){
3839 int next_i= i + run2 + 1;
3840 int next_level= block[ perm_scantable[next_i] ] + 64;
3842 if(next_level&(~127))
3845 if(next_i < last_non_zero)
3846 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3847 - length[UNI_AC_ENC_INDEX(run2, next_level)]
3848 - length[UNI_AC_ENC_INDEX(run, 65)];
3850 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3851 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3852 - length[UNI_AC_ENC_INDEX(run, 65)];
3854 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3856 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3857 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3864 unquant_change= new_coeff - old_coeff;
3865 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3867 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3868 if(score<best_score){
3871 best_change= change;
3872 best_unquant_change= unquant_change;
3876 prev_level= level + 64;
3877 if(prev_level&(~127))
3886 STOP_TIMER("iterative step")}
3890 int j= perm_scantable[ best_coeff ];
3892 block[j] += best_change;
3894 if(best_coeff > last_non_zero){
3895 last_non_zero= best_coeff;
3903 if(block[j] - best_change){
3904 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
3916 for(; last_non_zero>=start_i; last_non_zero--){
3917 if(block[perm_scantable[last_non_zero]])
3923 if(256*256*256*64 % count == 0){
3924 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
3929 for(i=start_i; i<=last_non_zero; i++){
3930 int j= perm_scantable[i];
3931 const int level= block[j];
3934 run_tab[rle_index++]=run;
3941 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
3947 if(last_non_zero>0){
3948 STOP_TIMER("iterative search")
3953 return last_non_zero;
3956 int ff_dct_quantize_c(MpegEncContext *s,
3957 int16_t *block, int n,
3958 int qscale, int *overflow)
3960 int i, j, level, last_non_zero, q, start_i;
3962 const uint8_t *scantable= s->intra_scantable.scantable;
3965 unsigned int threshold1, threshold2;
3967 s->dsp.fdct (block);
3969 if(s->dct_error_sum)
3970 s->denoise_dct(s, block);
3980 /* For AIC we skip quant/dequant of INTRADC */
3983 /* note: block[0] is assumed to be positive */
3984 block[0] = (block[0] + (q >> 1)) / q;
3987 qmat = s->q_intra_matrix[qscale];
3988 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3992 qmat = s->q_inter_matrix[qscale];
3993 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3995 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3996 threshold2= (threshold1<<1);
3997 for(i=63;i>=start_i;i--) {
3999 level = block[j] * qmat[j];
4001 if(((unsigned)(level+threshold1))>threshold2){
4008 for(i=start_i; i<=last_non_zero; i++) {
4010 level = block[j] * qmat[j];
4012 // if( bias+level >= (1<<QMAT_SHIFT)
4013 // || bias-level >= (1<<QMAT_SHIFT)){
4014 if(((unsigned)(level+threshold1))>threshold2){
4016 level= (bias + level)>>QMAT_SHIFT;
4019 level= (bias - level)>>QMAT_SHIFT;
4027 *overflow= s->max_qcoeff < max; //overflow might have happened
4029 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4030 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4031 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4033 return last_non_zero;
4036 #define OFFSET(x) offsetof(MpegEncContext, x)
4037 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4038 static const AVOption h263_options[] = {
4039 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4040 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4041 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4046 static const AVClass h263_class = {
4047 .class_name = "H.263 encoder",
4048 .item_name = av_default_item_name,
4049 .option = h263_options,
4050 .version = LIBAVUTIL_VERSION_INT,
4053 AVCodec ff_h263_encoder = {
4055 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4056 .type = AVMEDIA_TYPE_VIDEO,
4057 .id = AV_CODEC_ID_H263,
4058 .priv_data_size = sizeof(MpegEncContext),
4059 .init = ff_MPV_encode_init,
4060 .encode2 = ff_MPV_encode_picture,
4061 .close = ff_MPV_encode_end,
4062 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4063 .priv_class = &h263_class,
4066 static const AVOption h263p_options[] = {
4067 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4068 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4069 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4070 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4074 static const AVClass h263p_class = {
4075 .class_name = "H.263p encoder",
4076 .item_name = av_default_item_name,
4077 .option = h263p_options,
4078 .version = LIBAVUTIL_VERSION_INT,
4081 AVCodec ff_h263p_encoder = {
4083 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4084 .type = AVMEDIA_TYPE_VIDEO,
4085 .id = AV_CODEC_ID_H263P,
4086 .priv_data_size = sizeof(MpegEncContext),
4087 .init = ff_MPV_encode_init,
4088 .encode2 = ff_MPV_encode_picture,
4089 .close = ff_MPV_encode_end,
4090 .capabilities = CODEC_CAP_SLICE_THREADS,
4091 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4092 .priv_class = &h263p_class,
4095 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4097 AVCodec ff_msmpeg4v2_encoder = {
4098 .name = "msmpeg4v2",
4099 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4100 .type = AVMEDIA_TYPE_VIDEO,
4101 .id = AV_CODEC_ID_MSMPEG4V2,
4102 .priv_data_size = sizeof(MpegEncContext),
4103 .init = ff_MPV_encode_init,
4104 .encode2 = ff_MPV_encode_picture,
4105 .close = ff_MPV_encode_end,
4106 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4107 .priv_class = &msmpeg4v2_class,
4110 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4112 AVCodec ff_msmpeg4v3_encoder = {
4114 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4115 .type = AVMEDIA_TYPE_VIDEO,
4116 .id = AV_CODEC_ID_MSMPEG4V3,
4117 .priv_data_size = sizeof(MpegEncContext),
4118 .init = ff_MPV_encode_init,
4119 .encode2 = ff_MPV_encode_picture,
4120 .close = ff_MPV_encode_end,
4121 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4122 .priv_class = &msmpeg4v3_class,
4125 FF_MPV_GENERIC_CLASS(wmv1)
4127 AVCodec ff_wmv1_encoder = {
4129 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4130 .type = AVMEDIA_TYPE_VIDEO,
4131 .id = AV_CODEC_ID_WMV1,
4132 .priv_data_size = sizeof(MpegEncContext),
4133 .init = ff_MPV_encode_init,
4134 .encode2 = ff_MPV_encode_picture,
4135 .close = ff_MPV_encode_end,
4136 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4137 .priv_class = &wmv1_class,