2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/internal.h"
31 #include "libavutil/intmath.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/opt.h"
39 #include "mpegvideo.h"
47 #include "aandcttab.h"
49 #include "mpeg4video.h"
51 #include "bytestream.h"
54 static int encode_picture(MpegEncContext *s, int picture_number);
55 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
56 static int sse_mb(MpegEncContext *s);
57 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
58 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
60 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
61 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
63 const AVOption ff_mpv_generic_options[] = {
68 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
69 uint16_t (*qmat16)[2][64],
70 const uint16_t *quant_matrix,
71 int bias, int qmin, int qmax, int intra)
76 for (qscale = qmin; qscale <= qmax; qscale++) {
78 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
79 dsp->fdct == ff_jpeg_fdct_islow_10 ||
80 dsp->fdct == ff_faandct) {
81 for (i = 0; i < 64; i++) {
82 const int j = dsp->idct_permutation[i];
83 /* 16 <= qscale * quant_matrix[i] <= 7905
84 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
85 * 19952 <= x <= 249205026
86 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
87 * 3444240 >= (1 << 36) / (x) >= 275 */
89 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
90 (qscale * quant_matrix[j]));
92 } else if (dsp->fdct == ff_fdct_ifast) {
93 for (i = 0; i < 64; i++) {
94 const int j = dsp->idct_permutation[i];
95 /* 16 <= qscale * quant_matrix[i] <= 7905
96 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
97 * 19952 <= x <= 249205026
98 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
99 * 3444240 >= (1 << 36) / (x) >= 275 */
101 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
102 (ff_aanscales[i] * qscale *
106 for (i = 0; i < 64; i++) {
107 const int j = dsp->idct_permutation[i];
108 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
109 * Assume x = qscale * quant_matrix[i]
111 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
112 * so 32768 >= (1 << 19) / (x) >= 67 */
113 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
114 (qscale * quant_matrix[j]));
115 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
116 // (qscale * quant_matrix[i]);
117 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
118 (qscale * quant_matrix[j]);
120 if (qmat16[qscale][0][i] == 0 ||
121 qmat16[qscale][0][i] == 128 * 256)
122 qmat16[qscale][0][i] = 128 * 256 - 1;
123 qmat16[qscale][1][i] =
124 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
125 qmat16[qscale][0][i]);
129 for (i = intra; i < 64; i++) {
131 if (dsp->fdct == ff_fdct_ifast) {
132 max = (8191LL * ff_aanscales[i]) >> 14;
134 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
140 av_log(NULL, AV_LOG_INFO,
141 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
146 static inline void update_qscale(MpegEncContext *s)
148 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
149 (FF_LAMBDA_SHIFT + 7);
150 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
152 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
156 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
162 for (i = 0; i < 64; i++) {
163 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
170 * init s->current_picture.qscale_table from s->lambda_table
172 void ff_init_qscale_tab(MpegEncContext *s)
174 int8_t * const qscale_table = s->current_picture.qscale_table;
177 for (i = 0; i < s->mb_num; i++) {
178 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
179 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
180 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
185 static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst,
188 dst->pict_type = src->pict_type;
189 dst->quality = src->quality;
190 dst->coded_picture_number = src->coded_picture_number;
191 dst->display_picture_number = src->display_picture_number;
192 //dst->reference = src->reference;
194 dst->interlaced_frame = src->interlaced_frame;
195 dst->top_field_first = src->top_field_first;
198 static void update_duplicate_context_after_me(MpegEncContext *dst,
201 #define COPY(a) dst->a= src->a
203 COPY(current_picture);
209 COPY(picture_in_gop_number);
210 COPY(gop_picture_number);
211 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
212 COPY(progressive_frame); // FIXME don't set in encode_header
213 COPY(partitioned_frame); // FIXME don't set in encode_header
218 * Set the given MpegEncContext to defaults for encoding.
219 * the changed fields will not depend upon the prior state of the MpegEncContext.
221 static void MPV_encode_defaults(MpegEncContext *s)
224 ff_MPV_common_defaults(s);
226 for (i = -16; i < 16; i++) {
227 default_fcode_tab[i + MAX_MV] = 1;
229 s->me.mv_penalty = default_mv_penalty;
230 s->fcode_tab = default_fcode_tab;
233 /* init video encoder */
234 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
236 MpegEncContext *s = avctx->priv_data;
238 int chroma_h_shift, chroma_v_shift;
240 MPV_encode_defaults(s);
242 switch (avctx->codec_id) {
243 case AV_CODEC_ID_MPEG2VIDEO:
244 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
245 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
246 av_log(avctx, AV_LOG_ERROR,
247 "only YUV420 and YUV422 are supported\n");
251 case AV_CODEC_ID_LJPEG:
252 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
253 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
254 avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
255 avctx->pix_fmt != AV_PIX_FMT_BGRA &&
256 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
257 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
258 avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
259 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
260 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
264 case AV_CODEC_ID_MJPEG:
265 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
266 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
267 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
268 avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
269 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
270 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
275 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
276 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
281 switch (avctx->pix_fmt) {
282 case AV_PIX_FMT_YUVJ422P:
283 case AV_PIX_FMT_YUV422P:
284 s->chroma_format = CHROMA_422;
286 case AV_PIX_FMT_YUVJ420P:
287 case AV_PIX_FMT_YUV420P:
289 s->chroma_format = CHROMA_420;
293 s->bit_rate = avctx->bit_rate;
294 s->width = avctx->width;
295 s->height = avctx->height;
296 if (avctx->gop_size > 600 &&
297 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
298 av_log(avctx, AV_LOG_ERROR,
299 "Warning keyframe interval too large! reducing it ...\n");
300 avctx->gop_size = 600;
302 s->gop_size = avctx->gop_size;
304 s->flags = avctx->flags;
305 s->flags2 = avctx->flags2;
306 s->max_b_frames = avctx->max_b_frames;
307 s->codec_id = avctx->codec->id;
308 s->strict_std_compliance = avctx->strict_std_compliance;
309 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
310 s->mpeg_quant = avctx->mpeg_quant;
311 s->rtp_mode = !!avctx->rtp_payload_size;
312 s->intra_dc_precision = avctx->intra_dc_precision;
313 s->user_specified_pts = AV_NOPTS_VALUE;
315 if (s->gop_size <= 1) {
322 s->me_method = avctx->me_method;
325 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
327 s->adaptive_quant = (s->avctx->lumi_masking ||
328 s->avctx->dark_masking ||
329 s->avctx->temporal_cplx_masking ||
330 s->avctx->spatial_cplx_masking ||
331 s->avctx->p_masking ||
332 s->avctx->border_masking ||
333 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
336 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
338 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
339 av_log(avctx, AV_LOG_ERROR,
340 "a vbv buffer size is needed, "
341 "for encoding with a maximum bitrate\n");
345 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
346 av_log(avctx, AV_LOG_INFO,
347 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
350 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
351 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
355 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
356 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
360 if (avctx->rc_max_rate &&
361 avctx->rc_max_rate == avctx->bit_rate &&
362 avctx->rc_max_rate != avctx->rc_min_rate) {
363 av_log(avctx, AV_LOG_INFO,
364 "impossible bitrate constraints, this will fail\n");
367 if (avctx->rc_buffer_size &&
368 avctx->bit_rate * (int64_t)avctx->time_base.num >
369 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
370 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
374 if (!s->fixed_qscale &&
375 avctx->bit_rate * av_q2d(avctx->time_base) >
376 avctx->bit_rate_tolerance) {
377 av_log(avctx, AV_LOG_ERROR,
378 "bitrate tolerance too small for bitrate\n");
382 if (s->avctx->rc_max_rate &&
383 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
384 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
385 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
386 90000LL * (avctx->rc_buffer_size - 1) >
387 s->avctx->rc_max_rate * 0xFFFFLL) {
388 av_log(avctx, AV_LOG_INFO,
389 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
390 "specified vbv buffer is too large for the given bitrate!\n");
393 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
394 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
395 s->codec_id != AV_CODEC_ID_FLV1) {
396 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
400 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
401 av_log(avctx, AV_LOG_ERROR,
402 "OBMC is only supported with simple mb decision\n");
406 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
407 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
411 if (s->max_b_frames &&
412 s->codec_id != AV_CODEC_ID_MPEG4 &&
413 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
414 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
415 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
419 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
420 s->codec_id == AV_CODEC_ID_H263 ||
421 s->codec_id == AV_CODEC_ID_H263P) &&
422 (avctx->sample_aspect_ratio.num > 255 ||
423 avctx->sample_aspect_ratio.den > 255)) {
424 av_log(avctx, AV_LOG_ERROR,
425 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
426 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
430 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
431 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
432 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
436 // FIXME mpeg2 uses that too
437 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
438 av_log(avctx, AV_LOG_ERROR,
439 "mpeg2 style quantization not supported by codec\n");
443 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
444 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
448 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
449 s->avctx->mb_decision != FF_MB_DECISION_RD) {
450 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
454 if (s->avctx->scenechange_threshold < 1000000000 &&
455 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
456 av_log(avctx, AV_LOG_ERROR,
457 "closed gop with scene change detection are not supported yet, "
458 "set threshold to 1000000000\n");
462 if (s->flags & CODEC_FLAG_LOW_DELAY) {
463 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
464 av_log(avctx, AV_LOG_ERROR,
465 "low delay forcing is only available for mpeg2\n");
468 if (s->max_b_frames != 0) {
469 av_log(avctx, AV_LOG_ERROR,
470 "b frames cannot be used with low delay\n");
475 if (s->q_scale_type == 1) {
476 if (avctx->qmax > 12) {
477 av_log(avctx, AV_LOG_ERROR,
478 "non linear quant only supports qmax <= 12 currently\n");
483 if (s->avctx->thread_count > 1 &&
484 s->codec_id != AV_CODEC_ID_MPEG4 &&
485 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
486 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
487 (s->codec_id != AV_CODEC_ID_H263P)) {
488 av_log(avctx, AV_LOG_ERROR,
489 "multi threaded encoding not supported by codec\n");
493 if (s->avctx->thread_count < 1) {
494 av_log(avctx, AV_LOG_ERROR,
495 "automatic thread number detection not supported by codec,"
500 if (s->avctx->thread_count > 1)
503 if (!avctx->time_base.den || !avctx->time_base.num) {
504 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
508 i = (INT_MAX / 2 + 128) >> 8;
509 if (avctx->mb_threshold >= i) {
510 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
515 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
516 av_log(avctx, AV_LOG_INFO,
517 "notice: b_frame_strategy only affects the first pass\n");
518 avctx->b_frame_strategy = 0;
521 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
523 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
524 avctx->time_base.den /= i;
525 avctx->time_base.num /= i;
529 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
530 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
531 // (a + x * 3 / 8) / x
532 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
533 s->inter_quant_bias = 0;
535 s->intra_quant_bias = 0;
537 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
540 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
541 s->intra_quant_bias = avctx->intra_quant_bias;
542 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
543 s->inter_quant_bias = avctx->inter_quant_bias;
545 av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
548 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
549 s->avctx->time_base.den > (1 << 16) - 1) {
550 av_log(avctx, AV_LOG_ERROR,
551 "timebase %d/%d not supported by MPEG 4 standard, "
552 "the maximum admitted value for the timebase denominator "
553 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
557 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
559 switch (avctx->codec->id) {
560 case AV_CODEC_ID_MPEG1VIDEO:
561 s->out_format = FMT_MPEG1;
562 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
563 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
565 case AV_CODEC_ID_MPEG2VIDEO:
566 s->out_format = FMT_MPEG1;
567 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
568 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
571 case AV_CODEC_ID_LJPEG:
572 case AV_CODEC_ID_MJPEG:
573 s->out_format = FMT_MJPEG;
574 s->intra_only = 1; /* force intra only for jpeg */
575 if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
576 avctx->pix_fmt == AV_PIX_FMT_BGRA) {
577 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
578 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
579 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
581 s->mjpeg_vsample[0] = 2;
582 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
583 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
584 s->mjpeg_hsample[0] = 2;
585 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
586 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
588 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
589 ff_mjpeg_encode_init(s) < 0)
594 case AV_CODEC_ID_H261:
595 if (!CONFIG_H261_ENCODER)
597 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
598 av_log(avctx, AV_LOG_ERROR,
599 "The specified picture size of %dx%d is not valid for the "
600 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
601 s->width, s->height);
604 s->out_format = FMT_H261;
608 case AV_CODEC_ID_H263:
609 if (!CONFIG_H263_ENCODER)
611 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
612 s->width, s->height) == 8) {
613 av_log(avctx, AV_LOG_INFO,
614 "The specified picture size of %dx%d is not valid for "
615 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
616 "352x288, 704x576, and 1408x1152."
617 "Try H.263+.\n", s->width, s->height);
620 s->out_format = FMT_H263;
624 case AV_CODEC_ID_H263P:
625 s->out_format = FMT_H263;
628 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
629 s->modified_quant = s->h263_aic;
630 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
631 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
634 /* These are just to be sure */
638 case AV_CODEC_ID_FLV1:
639 s->out_format = FMT_H263;
640 s->h263_flv = 2; /* format = 1; 11-bit codes */
641 s->unrestricted_mv = 1;
642 s->rtp_mode = 0; /* don't allow GOB */
646 case AV_CODEC_ID_RV10:
647 s->out_format = FMT_H263;
651 case AV_CODEC_ID_RV20:
652 s->out_format = FMT_H263;
655 s->modified_quant = 1;
659 s->unrestricted_mv = 0;
661 case AV_CODEC_ID_MPEG4:
662 s->out_format = FMT_H263;
664 s->unrestricted_mv = 1;
665 s->low_delay = s->max_b_frames ? 0 : 1;
666 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
668 case AV_CODEC_ID_MSMPEG4V2:
669 s->out_format = FMT_H263;
671 s->unrestricted_mv = 1;
672 s->msmpeg4_version = 2;
676 case AV_CODEC_ID_MSMPEG4V3:
677 s->out_format = FMT_H263;
679 s->unrestricted_mv = 1;
680 s->msmpeg4_version = 3;
681 s->flipflop_rounding = 1;
685 case AV_CODEC_ID_WMV1:
686 s->out_format = FMT_H263;
688 s->unrestricted_mv = 1;
689 s->msmpeg4_version = 4;
690 s->flipflop_rounding = 1;
694 case AV_CODEC_ID_WMV2:
695 s->out_format = FMT_H263;
697 s->unrestricted_mv = 1;
698 s->msmpeg4_version = 5;
699 s->flipflop_rounding = 1;
707 avctx->has_b_frames = !s->low_delay;
711 s->progressive_frame =
712 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
713 CODEC_FLAG_INTERLACED_ME) ||
717 if (ff_MPV_common_init(s) < 0)
721 ff_MPV_encode_init_x86(s);
723 if (!s->dct_quantize)
724 s->dct_quantize = ff_dct_quantize_c;
726 s->denoise_dct = denoise_dct_c;
727 s->fast_dct_quantize = s->dct_quantize;
729 s->dct_quantize = dct_quantize_trellis_c;
731 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
732 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
734 s->quant_precision = 5;
736 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
737 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
739 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
740 ff_h261_encode_init(s);
741 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
742 ff_h263_encode_init(s);
743 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
744 ff_msmpeg4_encode_init(s);
745 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
746 && s->out_format == FMT_MPEG1)
747 ff_mpeg1_encode_init(s);
750 for (i = 0; i < 64; i++) {
751 int j = s->dsp.idct_permutation[i];
752 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
754 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
755 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
756 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
758 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
761 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
762 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
764 if (s->avctx->intra_matrix)
765 s->intra_matrix[j] = s->avctx->intra_matrix[i];
766 if (s->avctx->inter_matrix)
767 s->inter_matrix[j] = s->avctx->inter_matrix[i];
770 /* precompute matrix */
771 /* for mjpeg, we do include qscale in the matrix */
772 if (s->out_format != FMT_MJPEG) {
773 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
774 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
776 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
777 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
781 if (ff_rate_control_init(s) < 0)
787 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
789 MpegEncContext *s = avctx->priv_data;
791 ff_rate_control_uninit(s);
793 ff_MPV_common_end(s);
794 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
795 s->out_format == FMT_MJPEG)
796 ff_mjpeg_encode_close(s);
798 av_freep(&avctx->extradata);
803 static int get_sae(uint8_t *src, int ref, int stride)
808 for (y = 0; y < 16; y++) {
809 for (x = 0; x < 16; x++) {
810 acc += FFABS(src[x + y * stride] - ref);
817 static int get_intra_count(MpegEncContext *s, uint8_t *src,
818 uint8_t *ref, int stride)
826 for (y = 0; y < h; y += 16) {
827 for (x = 0; x < w; x += 16) {
828 int offset = x + y * stride;
829 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
831 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
832 int sae = get_sae(src + offset, mean, stride);
834 acc += sae + 500 < sad;
841 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
845 int i, display_picture_number = 0, ret;
846 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
847 (s->low_delay ? 0 : 1);
852 display_picture_number = s->input_picture_number++;
854 if (pts != AV_NOPTS_VALUE) {
855 if (s->user_specified_pts != AV_NOPTS_VALUE) {
857 int64_t last = s->user_specified_pts;
860 av_log(s->avctx, AV_LOG_ERROR,
861 "Error, Invalid timestamp=%"PRId64", "
862 "last=%"PRId64"\n", pts, s->user_specified_pts);
866 if (!s->low_delay && display_picture_number == 1)
867 s->dts_delta = time - last;
869 s->user_specified_pts = pts;
871 if (s->user_specified_pts != AV_NOPTS_VALUE) {
872 s->user_specified_pts =
873 pts = s->user_specified_pts + 1;
874 av_log(s->avctx, AV_LOG_INFO,
875 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
878 pts = display_picture_number;
884 if (!pic_arg->buf[0]);
886 if (pic_arg->linesize[0] != s->linesize)
888 if (pic_arg->linesize[1] != s->uvlinesize)
890 if (pic_arg->linesize[2] != s->uvlinesize)
893 av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0],
894 pic_arg->linesize[1], s->linesize, s->uvlinesize);
897 i = ff_find_unused_picture(s, 1);
901 pic = &s->picture[i];
904 if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
906 if (ff_alloc_picture(s, pic, 1) < 0) {
910 i = ff_find_unused_picture(s, 0);
914 pic = &s->picture[i];
917 if (ff_alloc_picture(s, pic, 0) < 0) {
921 if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
922 pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
923 pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
926 int h_chroma_shift, v_chroma_shift;
927 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
931 for (i = 0; i < 3; i++) {
932 int src_stride = pic_arg->linesize[i];
933 int dst_stride = i ? s->uvlinesize : s->linesize;
934 int h_shift = i ? h_chroma_shift : 0;
935 int v_shift = i ? v_chroma_shift : 0;
936 int w = s->width >> h_shift;
937 int h = s->height >> v_shift;
938 uint8_t *src = pic_arg->data[i];
939 uint8_t *dst = pic->f.data[i];
941 if (!s->avctx->rc_buffer_size)
942 dst += INPLACE_OFFSET;
944 if (src_stride == dst_stride)
945 memcpy(dst, src, src_stride * h);
956 copy_picture_attributes(s, &pic->f, pic_arg);
957 pic->f.display_picture_number = display_picture_number;
958 pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
961 /* shift buffer entries */
962 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
963 s->input_picture[i - 1] = s->input_picture[i];
965 s->input_picture[encoding_delay] = (Picture*) pic;
970 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
976 for (plane = 0; plane < 3; plane++) {
977 const int stride = p->f.linesize[plane];
978 const int bw = plane ? 1 : 2;
979 for (y = 0; y < s->mb_height * bw; y++) {
980 for (x = 0; x < s->mb_width * bw; x++) {
981 int off = p->shared ? 0 : 16;
982 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
983 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
984 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
986 switch (s->avctx->frame_skip_exp) {
987 case 0: score = FFMAX(score, v); break;
988 case 1: score += FFABS(v); break;
989 case 2: score += v * v; break;
990 case 3: score64 += FFABS(v * v * (int64_t)v); break;
991 case 4: score64 += v * v * (int64_t)(v * v); break;
1000 if (score64 < s->avctx->frame_skip_threshold)
1002 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1007 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1009 AVPacket pkt = { 0 };
1010 int ret, got_output;
1012 av_init_packet(&pkt);
1013 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1018 av_free_packet(&pkt);
1022 static int estimate_best_b_count(MpegEncContext *s)
1024 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1025 AVCodecContext *c = avcodec_alloc_context3(NULL);
1026 AVFrame input[FF_MAX_B_FRAMES + 2];
1027 const int scale = s->avctx->brd_scale;
1028 int i, j, out_size, p_lambda, b_lambda, lambda2;
1029 int64_t best_rd = INT64_MAX;
1030 int best_b_count = -1;
1032 assert(scale >= 0 && scale <= 3);
1035 //s->next_picture_ptr->quality;
1036 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1037 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1038 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1039 if (!b_lambda) // FIXME we should do this somewhere else
1040 b_lambda = p_lambda;
1041 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1044 c->width = s->width >> scale;
1045 c->height = s->height >> scale;
1046 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1047 CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1048 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1049 c->mb_decision = s->avctx->mb_decision;
1050 c->me_cmp = s->avctx->me_cmp;
1051 c->mb_cmp = s->avctx->mb_cmp;
1052 c->me_sub_cmp = s->avctx->me_sub_cmp;
1053 c->pix_fmt = AV_PIX_FMT_YUV420P;
1054 c->time_base = s->avctx->time_base;
1055 c->max_b_frames = s->max_b_frames;
1057 if (avcodec_open2(c, codec, NULL) < 0)
1060 for (i = 0; i < s->max_b_frames + 2; i++) {
1061 int ysize = c->width * c->height;
1062 int csize = (c->width / 2) * (c->height / 2);
1063 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1064 s->next_picture_ptr;
1066 avcodec_get_frame_defaults(&input[i]);
1067 input[i].data[0] = av_malloc(ysize + 2 * csize);
1068 input[i].data[1] = input[i].data[0] + ysize;
1069 input[i].data[2] = input[i].data[1] + csize;
1070 input[i].linesize[0] = c->width;
1071 input[i].linesize[1] =
1072 input[i].linesize[2] = c->width / 2;
1074 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1075 pre_input = *pre_input_ptr;
1077 if (!pre_input.shared && i) {
1078 pre_input.f.data[0] += INPLACE_OFFSET;
1079 pre_input.f.data[1] += INPLACE_OFFSET;
1080 pre_input.f.data[2] += INPLACE_OFFSET;
1083 s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1084 pre_input.f.data[0], pre_input.f.linesize[0],
1085 c->width, c->height);
1086 s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1087 pre_input.f.data[1], pre_input.f.linesize[1],
1088 c->width >> 1, c->height >> 1);
1089 s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1090 pre_input.f.data[2], pre_input.f.linesize[2],
1091 c->width >> 1, c->height >> 1);
1095 for (j = 0; j < s->max_b_frames + 1; j++) {
1098 if (!s->input_picture[j])
1101 c->error[0] = c->error[1] = c->error[2] = 0;
1103 input[0].pict_type = AV_PICTURE_TYPE_I;
1104 input[0].quality = 1 * FF_QP2LAMBDA;
1106 out_size = encode_frame(c, &input[0]);
1108 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1110 for (i = 0; i < s->max_b_frames + 1; i++) {
1111 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1113 input[i + 1].pict_type = is_p ?
1114 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1115 input[i + 1].quality = is_p ? p_lambda : b_lambda;
1117 out_size = encode_frame(c, &input[i + 1]);
1119 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1122 /* get the delayed frames */
1124 out_size = encode_frame(c, NULL);
1125 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1128 rd += c->error[0] + c->error[1] + c->error[2];
1139 for (i = 0; i < s->max_b_frames + 2; i++) {
1140 av_freep(&input[i].data[0]);
1143 return best_b_count;
1146 static int select_input_picture(MpegEncContext *s)
1150 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1151 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1152 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1154 /* set next picture type & ordering */
1155 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1156 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1157 s->next_picture_ptr == NULL || s->intra_only) {
1158 s->reordered_input_picture[0] = s->input_picture[0];
1159 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1160 s->reordered_input_picture[0]->f.coded_picture_number =
1161 s->coded_picture_number++;
1165 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1166 if (s->picture_in_gop_number < s->gop_size &&
1167 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1168 // FIXME check that te gop check above is +-1 correct
1169 av_frame_unref(&s->input_picture[0]->f);
1172 ff_vbv_update(s, 0);
1178 if (s->flags & CODEC_FLAG_PASS2) {
1179 for (i = 0; i < s->max_b_frames + 1; i++) {
1180 int pict_num = s->input_picture[0]->f.display_picture_number + i;
1182 if (pict_num >= s->rc_context.num_entries)
1184 if (!s->input_picture[i]) {
1185 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1189 s->input_picture[i]->f.pict_type =
1190 s->rc_context.entry[pict_num].new_pict_type;
1194 if (s->avctx->b_frame_strategy == 0) {
1195 b_frames = s->max_b_frames;
1196 while (b_frames && !s->input_picture[b_frames])
1198 } else if (s->avctx->b_frame_strategy == 1) {
1199 for (i = 1; i < s->max_b_frames + 1; i++) {
1200 if (s->input_picture[i] &&
1201 s->input_picture[i]->b_frame_score == 0) {
1202 s->input_picture[i]->b_frame_score =
1204 s->input_picture[i ]->f.data[0],
1205 s->input_picture[i - 1]->f.data[0],
1209 for (i = 0; i < s->max_b_frames + 1; i++) {
1210 if (s->input_picture[i] == NULL ||
1211 s->input_picture[i]->b_frame_score - 1 >
1212 s->mb_num / s->avctx->b_sensitivity)
1216 b_frames = FFMAX(0, i - 1);
1219 for (i = 0; i < b_frames + 1; i++) {
1220 s->input_picture[i]->b_frame_score = 0;
1222 } else if (s->avctx->b_frame_strategy == 2) {
1223 b_frames = estimate_best_b_count(s);
1225 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1231 for (i = b_frames - 1; i >= 0; i--) {
1232 int type = s->input_picture[i]->f.pict_type;
1233 if (type && type != AV_PICTURE_TYPE_B)
1236 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1237 b_frames == s->max_b_frames) {
1238 av_log(s->avctx, AV_LOG_ERROR,
1239 "warning, too many b frames in a row\n");
1242 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1243 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1244 s->gop_size > s->picture_in_gop_number) {
1245 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1247 if (s->flags & CODEC_FLAG_CLOSED_GOP)
1249 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1253 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1254 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1257 s->reordered_input_picture[0] = s->input_picture[b_frames];
1258 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1259 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1260 s->reordered_input_picture[0]->f.coded_picture_number =
1261 s->coded_picture_number++;
1262 for (i = 0; i < b_frames; i++) {
1263 s->reordered_input_picture[i + 1] = s->input_picture[i];
1264 s->reordered_input_picture[i + 1]->f.pict_type =
1266 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1267 s->coded_picture_number++;
1272 if (s->reordered_input_picture[0]) {
1273 s->reordered_input_picture[0]->reference =
1274 s->reordered_input_picture[0]->f.pict_type !=
1275 AV_PICTURE_TYPE_B ? 3 : 0;
1277 ff_mpeg_unref_picture(s, &s->new_picture);
1278 if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1281 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1282 // input is a shared pix, so we can't modifiy it -> alloc a new
1283 // one & ensure that the shared one is reuseable
1286 int i = ff_find_unused_picture(s, 0);
1289 pic = &s->picture[i];
1291 pic->reference = s->reordered_input_picture[0]->reference;
1292 if (ff_alloc_picture(s, pic, 0) < 0) {
1296 copy_picture_attributes(s, &pic->f,
1297 &s->reordered_input_picture[0]->f);
1299 /* mark us unused / free shared pic */
1300 av_frame_unref(&s->reordered_input_picture[0]->f);
1301 s->reordered_input_picture[0]->shared = 0;
1303 s->current_picture_ptr = pic;
1305 // input is not a shared pix -> reuse buffer for current_pix
1306 s->current_picture_ptr = s->reordered_input_picture[0];
1307 for (i = 0; i < 4; i++) {
1308 s->new_picture.f.data[i] += INPLACE_OFFSET;
1311 ff_mpeg_unref_picture(s, &s->current_picture);
1312 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1313 s->current_picture_ptr)) < 0)
1316 s->picture_number = s->new_picture.f.display_picture_number;
1318 ff_mpeg_unref_picture(s, &s->new_picture);
1323 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1324 const AVFrame *pic_arg, int *got_packet)
1326 MpegEncContext *s = avctx->priv_data;
1327 int i, stuffing_count, ret;
1328 int context_count = s->slice_context_count;
1330 s->picture_in_gop_number++;
1332 if (load_input_picture(s, pic_arg) < 0)
1335 if (select_input_picture(s) < 0) {
1340 if (s->new_picture.f.data[0]) {
1342 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1345 s->mb_info_ptr = av_packet_new_side_data(pkt,
1346 AV_PKT_DATA_H263_MB_INFO,
1347 s->mb_width*s->mb_height*12);
1348 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1351 for (i = 0; i < context_count; i++) {
1352 int start_y = s->thread_context[i]->start_mb_y;
1353 int end_y = s->thread_context[i]-> end_mb_y;
1354 int h = s->mb_height;
1355 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1356 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1358 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1361 s->pict_type = s->new_picture.f.pict_type;
1363 ff_MPV_frame_start(s, avctx);
1365 if (encode_picture(s, s->picture_number) < 0)
1368 avctx->header_bits = s->header_bits;
1369 avctx->mv_bits = s->mv_bits;
1370 avctx->misc_bits = s->misc_bits;
1371 avctx->i_tex_bits = s->i_tex_bits;
1372 avctx->p_tex_bits = s->p_tex_bits;
1373 avctx->i_count = s->i_count;
1374 // FIXME f/b_count in avctx
1375 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1376 avctx->skip_count = s->skip_count;
1378 ff_MPV_frame_end(s);
1380 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1381 ff_mjpeg_encode_picture_trailer(s);
1383 if (avctx->rc_buffer_size) {
1384 RateControlContext *rcc = &s->rc_context;
1385 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1387 if (put_bits_count(&s->pb) > max_size &&
1388 s->lambda < s->avctx->lmax) {
1389 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1390 (s->qscale + 1) / s->qscale);
1391 if (s->adaptive_quant) {
1393 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1394 s->lambda_table[i] =
1395 FFMAX(s->lambda_table[i] + 1,
1396 s->lambda_table[i] * (s->qscale + 1) /
1399 s->mb_skipped = 0; // done in MPV_frame_start()
1400 // done in encode_picture() so we must undo it
1401 if (s->pict_type == AV_PICTURE_TYPE_P) {
1402 if (s->flipflop_rounding ||
1403 s->codec_id == AV_CODEC_ID_H263P ||
1404 s->codec_id == AV_CODEC_ID_MPEG4)
1405 s->no_rounding ^= 1;
1407 if (s->pict_type != AV_PICTURE_TYPE_B) {
1408 s->time_base = s->last_time_base;
1409 s->last_non_b_time = s->time - s->pp_time;
1411 for (i = 0; i < context_count; i++) {
1412 PutBitContext *pb = &s->thread_context[i]->pb;
1413 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1418 assert(s->avctx->rc_max_rate);
1421 if (s->flags & CODEC_FLAG_PASS1)
1422 ff_write_pass1_stats(s);
1424 for (i = 0; i < 4; i++) {
1425 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1426 avctx->error[i] += s->current_picture_ptr->f.error[i];
1429 if (s->flags & CODEC_FLAG_PASS1)
1430 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1431 avctx->i_tex_bits + avctx->p_tex_bits ==
1432 put_bits_count(&s->pb));
1433 flush_put_bits(&s->pb);
1434 s->frame_bits = put_bits_count(&s->pb);
1436 stuffing_count = ff_vbv_update(s, s->frame_bits);
1437 if (stuffing_count) {
1438 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1439 stuffing_count + 50) {
1440 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1444 switch (s->codec_id) {
1445 case AV_CODEC_ID_MPEG1VIDEO:
1446 case AV_CODEC_ID_MPEG2VIDEO:
1447 while (stuffing_count--) {
1448 put_bits(&s->pb, 8, 0);
1451 case AV_CODEC_ID_MPEG4:
1452 put_bits(&s->pb, 16, 0);
1453 put_bits(&s->pb, 16, 0x1C3);
1454 stuffing_count -= 4;
1455 while (stuffing_count--) {
1456 put_bits(&s->pb, 8, 0xFF);
1460 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1462 flush_put_bits(&s->pb);
1463 s->frame_bits = put_bits_count(&s->pb);
1466 /* update mpeg1/2 vbv_delay for CBR */
1467 if (s->avctx->rc_max_rate &&
1468 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1469 s->out_format == FMT_MPEG1 &&
1470 90000LL * (avctx->rc_buffer_size - 1) <=
1471 s->avctx->rc_max_rate * 0xFFFFLL) {
1472 int vbv_delay, min_delay;
1473 double inbits = s->avctx->rc_max_rate *
1474 av_q2d(s->avctx->time_base);
1475 int minbits = s->frame_bits - 8 *
1476 (s->vbv_delay_ptr - s->pb.buf - 1);
1477 double bits = s->rc_context.buffer_index + minbits - inbits;
1480 av_log(s->avctx, AV_LOG_ERROR,
1481 "Internal error, negative bits\n");
1483 assert(s->repeat_first_field == 0);
1485 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1486 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1487 s->avctx->rc_max_rate;
1489 vbv_delay = FFMAX(vbv_delay, min_delay);
1491 assert(vbv_delay < 0xFFFF);
1493 s->vbv_delay_ptr[0] &= 0xF8;
1494 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1495 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1496 s->vbv_delay_ptr[2] &= 0x07;
1497 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1498 avctx->vbv_delay = vbv_delay * 300;
1500 s->total_bits += s->frame_bits;
1501 avctx->frame_bits = s->frame_bits;
1503 pkt->pts = s->current_picture.f.pts;
1504 if (!s->low_delay) {
1505 if (!s->current_picture.f.coded_picture_number)
1506 pkt->dts = pkt->pts - s->dts_delta;
1508 pkt->dts = s->reordered_pts;
1509 s->reordered_pts = s->input_picture[0]->f.pts;
1511 pkt->dts = pkt->pts;
1512 if (s->current_picture.f.key_frame)
1513 pkt->flags |= AV_PKT_FLAG_KEY;
1515 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1519 assert((s->frame_bits & 7) == 0);
1521 pkt->size = s->frame_bits / 8;
1522 *got_packet = !!pkt->size;
1526 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1527 int n, int threshold)
1529 static const char tab[64] = {
1530 3, 2, 2, 1, 1, 1, 1, 1,
1531 1, 1, 1, 1, 1, 1, 1, 1,
1532 1, 1, 1, 1, 1, 1, 1, 1,
1533 0, 0, 0, 0, 0, 0, 0, 0,
1534 0, 0, 0, 0, 0, 0, 0, 0,
1535 0, 0, 0, 0, 0, 0, 0, 0,
1536 0, 0, 0, 0, 0, 0, 0, 0,
1537 0, 0, 0, 0, 0, 0, 0, 0
1542 int16_t *block = s->block[n];
1543 const int last_index = s->block_last_index[n];
1546 if (threshold < 0) {
1548 threshold = -threshold;
1552 /* Are all we could set to zero already zero? */
1553 if (last_index <= skip_dc - 1)
1556 for (i = 0; i <= last_index; i++) {
1557 const int j = s->intra_scantable.permutated[i];
1558 const int level = FFABS(block[j]);
1560 if (skip_dc && i == 0)
1564 } else if (level > 1) {
1570 if (score >= threshold)
1572 for (i = skip_dc; i <= last_index; i++) {
1573 const int j = s->intra_scantable.permutated[i];
1577 s->block_last_index[n] = 0;
1579 s->block_last_index[n] = -1;
1582 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1586 const int maxlevel = s->max_qcoeff;
1587 const int minlevel = s->min_qcoeff;
1591 i = 1; // skip clipping of intra dc
1595 for (; i <= last_index; i++) {
1596 const int j = s->intra_scantable.permutated[i];
1597 int level = block[j];
1599 if (level > maxlevel) {
1602 } else if (level < minlevel) {
1610 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1611 av_log(s->avctx, AV_LOG_INFO,
1612 "warning, clipping %d dct coefficients to %d..%d\n",
1613 overflow, minlevel, maxlevel);
1616 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1620 for (y = 0; y < 8; y++) {
1621 for (x = 0; x < 8; x++) {
1627 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1628 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1629 int v = ptr[x2 + y2 * stride];
1635 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1640 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1641 int motion_x, int motion_y,
1642 int mb_block_height,
1645 int16_t weight[8][64];
1646 int16_t orig[8][64];
1647 const int mb_x = s->mb_x;
1648 const int mb_y = s->mb_y;
1651 int dct_offset = s->linesize * 8; // default for progressive frames
1652 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1655 for (i = 0; i < mb_block_count; i++)
1656 skip_dct[i] = s->skipdct;
1658 if (s->adaptive_quant) {
1659 const int last_qp = s->qscale;
1660 const int mb_xy = mb_x + mb_y * s->mb_stride;
1662 s->lambda = s->lambda_table[mb_xy];
1665 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1666 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1667 s->dquant = s->qscale - last_qp;
1669 if (s->out_format == FMT_H263) {
1670 s->dquant = av_clip(s->dquant, -2, 2);
1672 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1674 if (s->pict_type == AV_PICTURE_TYPE_B) {
1675 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1678 if (s->mv_type == MV_TYPE_8X8)
1684 ff_set_qscale(s, last_qp + s->dquant);
1685 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1686 ff_set_qscale(s, s->qscale + s->dquant);
1688 wrap_y = s->linesize;
1689 wrap_c = s->uvlinesize;
1690 ptr_y = s->new_picture.f.data[0] +
1691 (mb_y * 16 * wrap_y) + mb_x * 16;
1692 ptr_cb = s->new_picture.f.data[1] +
1693 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1694 ptr_cr = s->new_picture.f.data[2] +
1695 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1697 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1698 uint8_t *ebuf = s->edge_emu_buffer + 32;
1699 s->vdsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1700 mb_y * 16, s->width, s->height);
1702 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
1703 mb_block_height, mb_x * 8, mb_y * 8,
1704 s->width >> 1, s->height >> 1);
1705 ptr_cb = ebuf + 18 * wrap_y;
1706 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
1707 mb_block_height, mb_x * 8, mb_y * 8,
1708 s->width >> 1, s->height >> 1);
1709 ptr_cr = ebuf + 18 * wrap_y + 8;
1713 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1714 int progressive_score, interlaced_score;
1716 s->interlaced_dct = 0;
1717 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1719 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1720 NULL, wrap_y, 8) - 400;
1722 if (progressive_score > 0) {
1723 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1724 NULL, wrap_y * 2, 8) +
1725 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1726 NULL, wrap_y * 2, 8);
1727 if (progressive_score > interlaced_score) {
1728 s->interlaced_dct = 1;
1730 dct_offset = wrap_y;
1732 if (s->chroma_format == CHROMA_422)
1738 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1739 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1740 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1741 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1743 if (s->flags & CODEC_FLAG_GRAY) {
1747 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1748 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1749 if (!s->chroma_y_shift) { /* 422 */
1750 s->dsp.get_pixels(s->block[6],
1751 ptr_cb + (dct_offset >> 1), wrap_c);
1752 s->dsp.get_pixels(s->block[7],
1753 ptr_cr + (dct_offset >> 1), wrap_c);
1757 op_pixels_func (*op_pix)[4];
1758 qpel_mc_func (*op_qpix)[16];
1759 uint8_t *dest_y, *dest_cb, *dest_cr;
1761 dest_y = s->dest[0];
1762 dest_cb = s->dest[1];
1763 dest_cr = s->dest[2];
1765 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1766 op_pix = s->hdsp.put_pixels_tab;
1767 op_qpix = s->dsp.put_qpel_pixels_tab;
1769 op_pix = s->hdsp.put_no_rnd_pixels_tab;
1770 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1773 if (s->mv_dir & MV_DIR_FORWARD) {
1774 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1775 s->last_picture.f.data,
1777 op_pix = s->hdsp.avg_pixels_tab;
1778 op_qpix = s->dsp.avg_qpel_pixels_tab;
1780 if (s->mv_dir & MV_DIR_BACKWARD) {
1781 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1782 s->next_picture.f.data,
1786 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1787 int progressive_score, interlaced_score;
1789 s->interlaced_dct = 0;
1790 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1793 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1794 ptr_y + wrap_y * 8, wrap_y,
1797 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1798 progressive_score -= 400;
1800 if (progressive_score > 0) {
1801 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1804 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1808 if (progressive_score > interlaced_score) {
1809 s->interlaced_dct = 1;
1811 dct_offset = wrap_y;
1813 if (s->chroma_format == CHROMA_422)
1819 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1820 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1821 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1822 dest_y + dct_offset, wrap_y);
1823 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1824 dest_y + dct_offset + 8, wrap_y);
1826 if (s->flags & CODEC_FLAG_GRAY) {
1830 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1831 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1832 if (!s->chroma_y_shift) { /* 422 */
1833 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1834 dest_cb + (dct_offset >> 1), wrap_c);
1835 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1836 dest_cr + (dct_offset >> 1), wrap_c);
1839 /* pre quantization */
1840 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1841 2 * s->qscale * s->qscale) {
1843 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1844 wrap_y, 8) < 20 * s->qscale)
1846 if (s->dsp.sad[1](NULL, ptr_y + 8,
1847 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1849 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1850 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1852 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1853 dest_y + dct_offset + 8,
1854 wrap_y, 8) < 20 * s->qscale)
1856 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1857 wrap_c, 8) < 20 * s->qscale)
1859 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1860 wrap_c, 8) < 20 * s->qscale)
1862 if (!s->chroma_y_shift) { /* 422 */
1863 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1864 dest_cb + (dct_offset >> 1),
1865 wrap_c, 8) < 20 * s->qscale)
1867 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1868 dest_cr + (dct_offset >> 1),
1869 wrap_c, 8) < 20 * s->qscale)
1875 if (s->quantizer_noise_shaping) {
1877 get_visual_weight(weight[0], ptr_y , wrap_y);
1879 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
1881 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
1883 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1885 get_visual_weight(weight[4], ptr_cb , wrap_c);
1887 get_visual_weight(weight[5], ptr_cr , wrap_c);
1888 if (!s->chroma_y_shift) { /* 422 */
1890 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1893 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1896 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
1899 /* DCT & quantize */
1900 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1902 for (i = 0; i < mb_block_count; i++) {
1905 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1906 // FIXME we could decide to change to quantizer instead of
1908 // JS: I don't think that would be a good idea it could lower
1909 // quality instead of improve it. Just INTRADC clipping
1910 // deserves changes in quantizer
1912 clip_coeffs(s, s->block[i], s->block_last_index[i]);
1914 s->block_last_index[i] = -1;
1916 if (s->quantizer_noise_shaping) {
1917 for (i = 0; i < mb_block_count; i++) {
1919 s->block_last_index[i] =
1920 dct_quantize_refine(s, s->block[i], weight[i],
1921 orig[i], i, s->qscale);
1926 if (s->luma_elim_threshold && !s->mb_intra)
1927 for (i = 0; i < 4; i++)
1928 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1929 if (s->chroma_elim_threshold && !s->mb_intra)
1930 for (i = 4; i < mb_block_count; i++)
1931 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1933 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
1934 for (i = 0; i < mb_block_count; i++) {
1935 if (s->block_last_index[i] == -1)
1936 s->coded_score[i] = INT_MAX / 256;
1941 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
1942 s->block_last_index[4] =
1943 s->block_last_index[5] = 0;
1945 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
1948 // non c quantize code returns incorrect block_last_index FIXME
1949 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
1950 for (i = 0; i < mb_block_count; i++) {
1952 if (s->block_last_index[i] > 0) {
1953 for (j = 63; j > 0; j--) {
1954 if (s->block[i][s->intra_scantable.permutated[j]])
1957 s->block_last_index[i] = j;
1962 /* huffman encode */
1963 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
1964 case AV_CODEC_ID_MPEG1VIDEO:
1965 case AV_CODEC_ID_MPEG2VIDEO:
1966 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1967 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
1969 case AV_CODEC_ID_MPEG4:
1970 if (CONFIG_MPEG4_ENCODER)
1971 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
1973 case AV_CODEC_ID_MSMPEG4V2:
1974 case AV_CODEC_ID_MSMPEG4V3:
1975 case AV_CODEC_ID_WMV1:
1976 if (CONFIG_MSMPEG4_ENCODER)
1977 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
1979 case AV_CODEC_ID_WMV2:
1980 if (CONFIG_WMV2_ENCODER)
1981 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
1983 case AV_CODEC_ID_H261:
1984 if (CONFIG_H261_ENCODER)
1985 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
1987 case AV_CODEC_ID_H263:
1988 case AV_CODEC_ID_H263P:
1989 case AV_CODEC_ID_FLV1:
1990 case AV_CODEC_ID_RV10:
1991 case AV_CODEC_ID_RV20:
1992 if (CONFIG_H263_ENCODER)
1993 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
1995 case AV_CODEC_ID_MJPEG:
1996 if (CONFIG_MJPEG_ENCODER)
1997 ff_mjpeg_encode_mb(s, s->block);
2004 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2006 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
2007 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2010 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2013 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2016 d->mb_skip_run= s->mb_skip_run;
2018 d->last_dc[i] = s->last_dc[i];
2021 d->mv_bits= s->mv_bits;
2022 d->i_tex_bits= s->i_tex_bits;
2023 d->p_tex_bits= s->p_tex_bits;
2024 d->i_count= s->i_count;
2025 d->f_count= s->f_count;
2026 d->b_count= s->b_count;
2027 d->skip_count= s->skip_count;
2028 d->misc_bits= s->misc_bits;
2032 d->qscale= s->qscale;
2033 d->dquant= s->dquant;
2035 d->esc3_level_length= s->esc3_level_length;
2038 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2041 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2042 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2045 d->mb_skip_run= s->mb_skip_run;
2047 d->last_dc[i] = s->last_dc[i];
2050 d->mv_bits= s->mv_bits;
2051 d->i_tex_bits= s->i_tex_bits;
2052 d->p_tex_bits= s->p_tex_bits;
2053 d->i_count= s->i_count;
2054 d->f_count= s->f_count;
2055 d->b_count= s->b_count;
2056 d->skip_count= s->skip_count;
2057 d->misc_bits= s->misc_bits;
2059 d->mb_intra= s->mb_intra;
2060 d->mb_skipped= s->mb_skipped;
2061 d->mv_type= s->mv_type;
2062 d->mv_dir= s->mv_dir;
2064 if(s->data_partitioning){
2066 d->tex_pb= s->tex_pb;
2070 d->block_last_index[i]= s->block_last_index[i];
2071 d->interlaced_dct= s->interlaced_dct;
2072 d->qscale= s->qscale;
2074 d->esc3_level_length= s->esc3_level_length;
2077 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2078 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2079 int *dmin, int *next_block, int motion_x, int motion_y)
2082 uint8_t *dest_backup[3];
2084 copy_context_before_encode(s, backup, type);
2086 s->block= s->blocks[*next_block];
2087 s->pb= pb[*next_block];
2088 if(s->data_partitioning){
2089 s->pb2 = pb2 [*next_block];
2090 s->tex_pb= tex_pb[*next_block];
2094 memcpy(dest_backup, s->dest, sizeof(s->dest));
2095 s->dest[0] = s->rd_scratchpad;
2096 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2097 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2098 assert(s->linesize >= 32); //FIXME
2101 encode_mb(s, motion_x, motion_y);
2103 score= put_bits_count(&s->pb);
2104 if(s->data_partitioning){
2105 score+= put_bits_count(&s->pb2);
2106 score+= put_bits_count(&s->tex_pb);
2109 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2110 ff_MPV_decode_mb(s, s->block);
2112 score *= s->lambda2;
2113 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2117 memcpy(s->dest, dest_backup, sizeof(s->dest));
2124 copy_context_after_encode(best, s, type);
2128 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2129 uint32_t *sq = ff_squareTbl + 256;
2134 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2135 else if(w==8 && h==8)
2136 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2140 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2149 static int sse_mb(MpegEncContext *s){
2153 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2154 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2157 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2158 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2159 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2160 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2162 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2163 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2164 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2167 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2168 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2169 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2172 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2173 MpegEncContext *s= *(void**)arg;
2177 s->me.dia_size= s->avctx->pre_dia_size;
2178 s->first_slice_line=1;
2179 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2180 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2181 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2183 s->first_slice_line=0;
2191 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2192 MpegEncContext *s= *(void**)arg;
2194 ff_check_alignment();
2196 s->me.dia_size= s->avctx->dia_size;
2197 s->first_slice_line=1;
2198 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2199 s->mb_x=0; //for block init below
2200 ff_init_block_index(s);
2201 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2202 s->block_index[0]+=2;
2203 s->block_index[1]+=2;
2204 s->block_index[2]+=2;
2205 s->block_index[3]+=2;
2207 /* compute motion vector & mb_type and store in context */
2208 if(s->pict_type==AV_PICTURE_TYPE_B)
2209 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2211 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2213 s->first_slice_line=0;
2218 static int mb_var_thread(AVCodecContext *c, void *arg){
2219 MpegEncContext *s= *(void**)arg;
2222 ff_check_alignment();
2224 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2225 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2228 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2230 int sum = s->dsp.pix_sum(pix, s->linesize);
2232 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2234 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2235 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2236 s->me.mb_var_sum_temp += varc;
2242 static void write_slice_end(MpegEncContext *s){
2243 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2244 if(s->partitioned_frame){
2245 ff_mpeg4_merge_partitions(s);
2248 ff_mpeg4_stuffing(&s->pb);
2249 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2250 ff_mjpeg_encode_stuffing(&s->pb);
2253 avpriv_align_put_bits(&s->pb);
2254 flush_put_bits(&s->pb);
2256 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2257 s->misc_bits+= get_bits_diff(s);
2260 static void write_mb_info(MpegEncContext *s)
2262 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2263 int offset = put_bits_count(&s->pb);
2264 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2265 int gobn = s->mb_y / s->gob_index;
2267 if (CONFIG_H263_ENCODER)
2268 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2269 bytestream_put_le32(&ptr, offset);
2270 bytestream_put_byte(&ptr, s->qscale);
2271 bytestream_put_byte(&ptr, gobn);
2272 bytestream_put_le16(&ptr, mba);
2273 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2274 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2275 /* 4MV not implemented */
2276 bytestream_put_byte(&ptr, 0); /* hmv2 */
2277 bytestream_put_byte(&ptr, 0); /* vmv2 */
2280 static void update_mb_info(MpegEncContext *s, int startcode)
2284 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2285 s->mb_info_size += 12;
2286 s->prev_mb_info = s->last_mb_info;
2289 s->prev_mb_info = put_bits_count(&s->pb)/8;
2290 /* This might have incremented mb_info_size above, and we return without
2291 * actually writing any info into that slot yet. But in that case,
2292 * this will be called again at the start of the after writing the
2293 * start code, actually writing the mb info. */
2297 s->last_mb_info = put_bits_count(&s->pb)/8;
2298 if (!s->mb_info_size)
2299 s->mb_info_size += 12;
2303 static int encode_thread(AVCodecContext *c, void *arg){
2304 MpegEncContext *s= *(void**)arg;
2305 int mb_x, mb_y, pdif = 0;
2306 int chr_h= 16>>s->chroma_y_shift;
2308 MpegEncContext best_s, backup_s;
2309 uint8_t bit_buf[2][MAX_MB_BYTES];
2310 uint8_t bit_buf2[2][MAX_MB_BYTES];
2311 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2312 PutBitContext pb[2], pb2[2], tex_pb[2];
2314 ff_check_alignment();
2317 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2318 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2319 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2322 s->last_bits= put_bits_count(&s->pb);
2333 /* init last dc values */
2334 /* note: quant matrix value (8) is implied here */
2335 s->last_dc[i] = 128 << s->intra_dc_precision;
2337 s->current_picture.f.error[i] = 0;
2340 memset(s->last_mv, 0, sizeof(s->last_mv));
2344 switch(s->codec_id){
2345 case AV_CODEC_ID_H263:
2346 case AV_CODEC_ID_H263P:
2347 case AV_CODEC_ID_FLV1:
2348 if (CONFIG_H263_ENCODER)
2349 s->gob_index = ff_h263_get_gob_height(s);
2351 case AV_CODEC_ID_MPEG4:
2352 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2353 ff_mpeg4_init_partitions(s);
2359 s->first_slice_line = 1;
2360 s->ptr_lastgob = s->pb.buf;
2361 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2365 ff_set_qscale(s, s->qscale);
2366 ff_init_block_index(s);
2368 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2369 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2370 int mb_type= s->mb_type[xy];
2375 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2376 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2379 if(s->data_partitioning){
2380 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2381 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2382 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2388 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2389 ff_update_block_index(s);
2391 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2392 ff_h261_reorder_mb_index(s);
2393 xy= s->mb_y*s->mb_stride + s->mb_x;
2394 mb_type= s->mb_type[xy];
2397 /* write gob / video packet header */
2399 int current_packet_size, is_gob_start;
2401 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2403 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2405 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2407 switch(s->codec_id){
2408 case AV_CODEC_ID_H263:
2409 case AV_CODEC_ID_H263P:
2410 if(!s->h263_slice_structured)
2411 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2413 case AV_CODEC_ID_MPEG2VIDEO:
2414 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2415 case AV_CODEC_ID_MPEG1VIDEO:
2416 if(s->mb_skip_run) is_gob_start=0;
2421 if(s->start_mb_y != mb_y || mb_x!=0){
2424 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2425 ff_mpeg4_init_partitions(s);
2429 assert((put_bits_count(&s->pb)&7) == 0);
2430 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2432 if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2433 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2434 int d= 100 / s->avctx->error_rate;
2436 current_packet_size=0;
2437 s->pb.buf_ptr= s->ptr_lastgob;
2438 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2442 if (s->avctx->rtp_callback){
2443 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2444 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2446 update_mb_info(s, 1);
2448 switch(s->codec_id){
2449 case AV_CODEC_ID_MPEG4:
2450 if (CONFIG_MPEG4_ENCODER) {
2451 ff_mpeg4_encode_video_packet_header(s);
2452 ff_mpeg4_clean_buffers(s);
2455 case AV_CODEC_ID_MPEG1VIDEO:
2456 case AV_CODEC_ID_MPEG2VIDEO:
2457 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2458 ff_mpeg1_encode_slice_header(s);
2459 ff_mpeg1_clean_buffers(s);
2462 case AV_CODEC_ID_H263:
2463 case AV_CODEC_ID_H263P:
2464 if (CONFIG_H263_ENCODER)
2465 ff_h263_encode_gob_header(s, mb_y);
2469 if(s->flags&CODEC_FLAG_PASS1){
2470 int bits= put_bits_count(&s->pb);
2471 s->misc_bits+= bits - s->last_bits;
2475 s->ptr_lastgob += current_packet_size;
2476 s->first_slice_line=1;
2477 s->resync_mb_x=mb_x;
2478 s->resync_mb_y=mb_y;
2482 if( (s->resync_mb_x == s->mb_x)
2483 && s->resync_mb_y+1 == s->mb_y){
2484 s->first_slice_line=0;
2488 s->dquant=0; //only for QP_RD
2490 update_mb_info(s, 0);
2492 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2494 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2496 copy_context_before_encode(&backup_s, s, -1);
2498 best_s.data_partitioning= s->data_partitioning;
2499 best_s.partitioned_frame= s->partitioned_frame;
2500 if(s->data_partitioning){
2501 backup_s.pb2= s->pb2;
2502 backup_s.tex_pb= s->tex_pb;
2505 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2506 s->mv_dir = MV_DIR_FORWARD;
2507 s->mv_type = MV_TYPE_16X16;
2509 s->mv[0][0][0] = s->p_mv_table[xy][0];
2510 s->mv[0][0][1] = s->p_mv_table[xy][1];
2511 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2512 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2514 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2515 s->mv_dir = MV_DIR_FORWARD;
2516 s->mv_type = MV_TYPE_FIELD;
2519 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2520 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2521 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2523 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2524 &dmin, &next_block, 0, 0);
2526 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2527 s->mv_dir = MV_DIR_FORWARD;
2528 s->mv_type = MV_TYPE_16X16;
2532 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2533 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2535 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2536 s->mv_dir = MV_DIR_FORWARD;
2537 s->mv_type = MV_TYPE_8X8;
2540 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2541 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2543 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2544 &dmin, &next_block, 0, 0);
2546 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2547 s->mv_dir = MV_DIR_FORWARD;
2548 s->mv_type = MV_TYPE_16X16;
2550 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2551 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2552 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2553 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2555 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2556 s->mv_dir = MV_DIR_BACKWARD;
2557 s->mv_type = MV_TYPE_16X16;
2559 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2560 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2561 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2562 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2564 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2565 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2566 s->mv_type = MV_TYPE_16X16;
2568 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2569 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2570 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2571 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2572 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2573 &dmin, &next_block, 0, 0);
2575 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2576 s->mv_dir = MV_DIR_FORWARD;
2577 s->mv_type = MV_TYPE_FIELD;
2580 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2581 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2582 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2584 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2585 &dmin, &next_block, 0, 0);
2587 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2588 s->mv_dir = MV_DIR_BACKWARD;
2589 s->mv_type = MV_TYPE_FIELD;
2592 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2593 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2594 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2596 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2597 &dmin, &next_block, 0, 0);
2599 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2600 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2601 s->mv_type = MV_TYPE_FIELD;
2603 for(dir=0; dir<2; dir++){
2605 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2606 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2607 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2610 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2611 &dmin, &next_block, 0, 0);
2613 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2615 s->mv_type = MV_TYPE_16X16;
2619 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2620 &dmin, &next_block, 0, 0);
2621 if(s->h263_pred || s->h263_aic){
2623 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2625 ff_clean_intra_table_entries(s); //old mode?
2629 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2630 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2631 const int last_qp= backup_s.qscale;
2634 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2635 static const int dquant_tab[4]={-1,1,-2,2};
2637 assert(backup_s.dquant == 0);
2640 s->mv_dir= best_s.mv_dir;
2641 s->mv_type = MV_TYPE_16X16;
2642 s->mb_intra= best_s.mb_intra;
2643 s->mv[0][0][0] = best_s.mv[0][0][0];
2644 s->mv[0][0][1] = best_s.mv[0][0][1];
2645 s->mv[1][0][0] = best_s.mv[1][0][0];
2646 s->mv[1][0][1] = best_s.mv[1][0][1];
2648 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2649 for(; qpi<4; qpi++){
2650 int dquant= dquant_tab[qpi];
2651 qp= last_qp + dquant;
2652 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2654 backup_s.dquant= dquant;
2655 if(s->mb_intra && s->dc_val[0]){
2657 dc[i]= s->dc_val[0][ s->block_index[i] ];
2658 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2662 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2663 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2664 if(best_s.qscale != qp){
2665 if(s->mb_intra && s->dc_val[0]){
2667 s->dc_val[0][ s->block_index[i] ]= dc[i];
2668 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2675 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2676 int mx= s->b_direct_mv_table[xy][0];
2677 int my= s->b_direct_mv_table[xy][1];
2679 backup_s.dquant = 0;
2680 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2682 ff_mpeg4_set_direct_mv(s, mx, my);
2683 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2684 &dmin, &next_block, mx, my);
2686 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2687 backup_s.dquant = 0;
2688 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2690 ff_mpeg4_set_direct_mv(s, 0, 0);
2691 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2692 &dmin, &next_block, 0, 0);
2694 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2697 coded |= s->block_last_index[i];
2700 memcpy(s->mv, best_s.mv, sizeof(s->mv));
2701 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2702 mx=my=0; //FIXME find the one we actually used
2703 ff_mpeg4_set_direct_mv(s, mx, my);
2704 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2712 s->mv_dir= best_s.mv_dir;
2713 s->mv_type = best_s.mv_type;
2715 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2716 s->mv[0][0][1] = best_s.mv[0][0][1];
2717 s->mv[1][0][0] = best_s.mv[1][0][0];
2718 s->mv[1][0][1] = best_s.mv[1][0][1];*/
2721 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2722 &dmin, &next_block, mx, my);
2727 s->current_picture.qscale_table[xy] = best_s.qscale;
2729 copy_context_after_encode(s, &best_s, -1);
2731 pb_bits_count= put_bits_count(&s->pb);
2732 flush_put_bits(&s->pb);
2733 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2736 if(s->data_partitioning){
2737 pb2_bits_count= put_bits_count(&s->pb2);
2738 flush_put_bits(&s->pb2);
2739 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2740 s->pb2= backup_s.pb2;
2742 tex_pb_bits_count= put_bits_count(&s->tex_pb);
2743 flush_put_bits(&s->tex_pb);
2744 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2745 s->tex_pb= backup_s.tex_pb;
2747 s->last_bits= put_bits_count(&s->pb);
2749 if (CONFIG_H263_ENCODER &&
2750 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2751 ff_h263_update_motion_val(s);
2753 if(next_block==0){ //FIXME 16 vs linesize16
2754 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2755 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2756 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2759 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2760 ff_MPV_decode_mb(s, s->block);
2762 int motion_x = 0, motion_y = 0;
2763 s->mv_type=MV_TYPE_16X16;
2764 // only one MB-Type possible
2767 case CANDIDATE_MB_TYPE_INTRA:
2770 motion_x= s->mv[0][0][0] = 0;
2771 motion_y= s->mv[0][0][1] = 0;
2773 case CANDIDATE_MB_TYPE_INTER:
2774 s->mv_dir = MV_DIR_FORWARD;
2776 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2777 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2779 case CANDIDATE_MB_TYPE_INTER_I:
2780 s->mv_dir = MV_DIR_FORWARD;
2781 s->mv_type = MV_TYPE_FIELD;
2784 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2785 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2786 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2789 case CANDIDATE_MB_TYPE_INTER4V:
2790 s->mv_dir = MV_DIR_FORWARD;
2791 s->mv_type = MV_TYPE_8X8;
2794 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2795 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2798 case CANDIDATE_MB_TYPE_DIRECT:
2799 if (CONFIG_MPEG4_ENCODER) {
2800 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2802 motion_x=s->b_direct_mv_table[xy][0];
2803 motion_y=s->b_direct_mv_table[xy][1];
2804 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2807 case CANDIDATE_MB_TYPE_DIRECT0:
2808 if (CONFIG_MPEG4_ENCODER) {
2809 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2811 ff_mpeg4_set_direct_mv(s, 0, 0);
2814 case CANDIDATE_MB_TYPE_BIDIR:
2815 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2817 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2818 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2819 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2820 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2822 case CANDIDATE_MB_TYPE_BACKWARD:
2823 s->mv_dir = MV_DIR_BACKWARD;
2825 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2826 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2828 case CANDIDATE_MB_TYPE_FORWARD:
2829 s->mv_dir = MV_DIR_FORWARD;
2831 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2832 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2834 case CANDIDATE_MB_TYPE_FORWARD_I:
2835 s->mv_dir = MV_DIR_FORWARD;
2836 s->mv_type = MV_TYPE_FIELD;
2839 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2840 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2841 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2844 case CANDIDATE_MB_TYPE_BACKWARD_I:
2845 s->mv_dir = MV_DIR_BACKWARD;
2846 s->mv_type = MV_TYPE_FIELD;
2849 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2850 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2851 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2854 case CANDIDATE_MB_TYPE_BIDIR_I:
2855 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2856 s->mv_type = MV_TYPE_FIELD;
2858 for(dir=0; dir<2; dir++){
2860 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2861 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2862 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2867 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2870 encode_mb(s, motion_x, motion_y);
2872 // RAL: Update last macroblock type
2873 s->last_mv_dir = s->mv_dir;
2875 if (CONFIG_H263_ENCODER &&
2876 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2877 ff_h263_update_motion_val(s);
2879 ff_MPV_decode_mb(s, s->block);
2882 /* clean the MV table in IPS frames for direct mode in B frames */
2883 if(s->mb_intra /* && I,P,S_TYPE */){
2884 s->p_mv_table[xy][0]=0;
2885 s->p_mv_table[xy][1]=0;
2888 if(s->flags&CODEC_FLAG_PSNR){
2892 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2893 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2895 s->current_picture.f.error[0] += sse(
2896 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2897 s->dest[0], w, h, s->linesize);
2898 s->current_picture.f.error[1] += sse(
2899 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2900 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2901 s->current_picture.f.error[2] += sse(
2902 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2903 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2906 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2907 ff_h263_loop_filter(s);
2909 av_dlog(s->avctx, "MB %d %d bits\n",
2910 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
2914 //not beautiful here but we must write it before flushing so it has to be here
2915 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2916 ff_msmpeg4_encode_ext_header(s);
2920 /* Send the last GOB if RTP */
2921 if (s->avctx->rtp_callback) {
2922 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2923 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2924 /* Call the RTP callback to send the last GOB */
2926 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2932 #define MERGE(field) dst->field += src->field; src->field=0
2933 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2934 MERGE(me.scene_change_score);
2935 MERGE(me.mc_mb_var_sum_temp);
2936 MERGE(me.mb_var_sum_temp);
2939 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2942 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2943 MERGE(dct_count[1]);
2952 MERGE(er.error_count);
2953 MERGE(padding_bug_score);
2954 MERGE(current_picture.f.error[0]);
2955 MERGE(current_picture.f.error[1]);
2956 MERGE(current_picture.f.error[2]);
2958 if(dst->avctx->noise_reduction){
2959 for(i=0; i<64; i++){
2960 MERGE(dct_error_sum[0][i]);
2961 MERGE(dct_error_sum[1][i]);
2965 assert(put_bits_count(&src->pb) % 8 ==0);
2966 assert(put_bits_count(&dst->pb) % 8 ==0);
2967 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
2968 flush_put_bits(&dst->pb);
2971 static int estimate_qp(MpegEncContext *s, int dry_run){
2972 if (s->next_lambda){
2973 s->current_picture_ptr->f.quality =
2974 s->current_picture.f.quality = s->next_lambda;
2975 if(!dry_run) s->next_lambda= 0;
2976 } else if (!s->fixed_qscale) {
2977 s->current_picture_ptr->f.quality =
2978 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
2979 if (s->current_picture.f.quality < 0)
2983 if(s->adaptive_quant){
2984 switch(s->codec_id){
2985 case AV_CODEC_ID_MPEG4:
2986 if (CONFIG_MPEG4_ENCODER)
2987 ff_clean_mpeg4_qscales(s);
2989 case AV_CODEC_ID_H263:
2990 case AV_CODEC_ID_H263P:
2991 case AV_CODEC_ID_FLV1:
2992 if (CONFIG_H263_ENCODER)
2993 ff_clean_h263_qscales(s);
2996 ff_init_qscale_tab(s);
2999 s->lambda= s->lambda_table[0];
3002 s->lambda = s->current_picture.f.quality;
3007 /* must be called before writing the header */
3008 static void set_frame_distances(MpegEncContext * s){
3009 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3010 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3012 if(s->pict_type==AV_PICTURE_TYPE_B){
3013 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3014 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3016 s->pp_time= s->time - s->last_non_b_time;
3017 s->last_non_b_time= s->time;
3018 assert(s->picture_number==0 || s->pp_time > 0);
3022 static int encode_picture(MpegEncContext *s, int picture_number)
3026 int context_count = s->slice_context_count;
3028 s->picture_number = picture_number;
3030 /* Reset the average MB variance */
3031 s->me.mb_var_sum_temp =
3032 s->me.mc_mb_var_sum_temp = 0;
3034 /* we need to initialize some time vars before we can encode b-frames */
3035 // RAL: Condition added for MPEG1VIDEO
3036 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3037 set_frame_distances(s);
3038 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3039 ff_set_mpeg4_time(s);
3041 s->me.scene_change_score=0;
3043 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3045 if(s->pict_type==AV_PICTURE_TYPE_I){
3046 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3047 else s->no_rounding=0;
3048 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3049 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3050 s->no_rounding ^= 1;
3053 if(s->flags & CODEC_FLAG_PASS2){
3054 if (estimate_qp(s,1) < 0)
3056 ff_get_2pass_fcode(s);
3057 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3058 if(s->pict_type==AV_PICTURE_TYPE_B)
3059 s->lambda= s->last_lambda_for[s->pict_type];
3061 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3065 s->mb_intra=0; //for the rate distortion & bit compare functions
3066 for(i=1; i<context_count; i++){
3067 ret = ff_update_duplicate_context(s->thread_context[i], s);
3075 /* Estimate motion for every MB */
3076 if(s->pict_type != AV_PICTURE_TYPE_I){
3077 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3078 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3079 if (s->pict_type != AV_PICTURE_TYPE_B) {
3080 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3081 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3085 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3086 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3088 for(i=0; i<s->mb_stride*s->mb_height; i++)
3089 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3091 if(!s->fixed_qscale){
3092 /* finding spatial complexity for I-frame rate control */
3093 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3096 for(i=1; i<context_count; i++){
3097 merge_context_after_me(s, s->thread_context[i]);
3099 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3100 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3103 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3104 s->pict_type= AV_PICTURE_TYPE_I;
3105 for(i=0; i<s->mb_stride*s->mb_height; i++)
3106 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3107 av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3108 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3112 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3113 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3115 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3117 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3118 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3119 s->f_code= FFMAX3(s->f_code, a, b);
3122 ff_fix_long_p_mvs(s);
3123 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3124 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3128 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3129 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3134 if(s->pict_type==AV_PICTURE_TYPE_B){
3137 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3138 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3139 s->f_code = FFMAX(a, b);
3141 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3142 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3143 s->b_code = FFMAX(a, b);
3145 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3146 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3147 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3148 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3149 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3151 for(dir=0; dir<2; dir++){
3154 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3155 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3156 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3157 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3165 if (estimate_qp(s, 0) < 0)
3168 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3169 s->qscale= 3; //reduce clipping problems
3171 if (s->out_format == FMT_MJPEG) {
3172 /* for mjpeg, we do include qscale in the matrix */
3174 int j= s->dsp.idct_permutation[i];
3176 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3178 s->y_dc_scale_table=
3179 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3180 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3181 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3182 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3186 //FIXME var duplication
3187 s->current_picture_ptr->f.key_frame =
3188 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3189 s->current_picture_ptr->f.pict_type =
3190 s->current_picture.f.pict_type = s->pict_type;
3192 if (s->current_picture.f.key_frame)
3193 s->picture_in_gop_number=0;
3195 s->last_bits= put_bits_count(&s->pb);
3196 switch(s->out_format) {
3198 if (CONFIG_MJPEG_ENCODER)
3199 ff_mjpeg_encode_picture_header(s);
3202 if (CONFIG_H261_ENCODER)
3203 ff_h261_encode_picture_header(s, picture_number);
3206 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3207 ff_wmv2_encode_picture_header(s, picture_number);
3208 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3209 ff_msmpeg4_encode_picture_header(s, picture_number);
3210 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3211 ff_mpeg4_encode_picture_header(s, picture_number);
3212 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3213 ff_rv10_encode_picture_header(s, picture_number);
3214 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3215 ff_rv20_encode_picture_header(s, picture_number);
3216 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3217 ff_flv_encode_picture_header(s, picture_number);
3218 else if (CONFIG_H263_ENCODER)
3219 ff_h263_encode_picture_header(s, picture_number);
3222 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3223 ff_mpeg1_encode_picture_header(s, picture_number);
3228 bits= put_bits_count(&s->pb);
3229 s->header_bits= bits - s->last_bits;
3231 for(i=1; i<context_count; i++){
3232 update_duplicate_context_after_me(s->thread_context[i], s);
3234 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3235 for(i=1; i<context_count; i++){
3236 merge_context_after_encode(s, s->thread_context[i]);
3242 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3243 const int intra= s->mb_intra;
3246 s->dct_count[intra]++;
3248 for(i=0; i<64; i++){
3249 int level= block[i];
3253 s->dct_error_sum[intra][i] += level;
3254 level -= s->dct_offset[intra][i];
3255 if(level<0) level=0;
3257 s->dct_error_sum[intra][i] -= level;
3258 level += s->dct_offset[intra][i];
3259 if(level>0) level=0;
3266 static int dct_quantize_trellis_c(MpegEncContext *s,
3267 int16_t *block, int n,
3268 int qscale, int *overflow){
3270 const uint8_t *scantable= s->intra_scantable.scantable;
3271 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3273 unsigned int threshold1, threshold2;
3285 int coeff_count[64];
3286 int qmul, qadd, start_i, last_non_zero, i, dc;
3287 const int esc_length= s->ac_esc_length;
3289 uint8_t * last_length;
3290 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3292 s->dsp.fdct (block);
3294 if(s->dct_error_sum)
3295 s->denoise_dct(s, block);
3297 qadd= ((qscale-1)|1)*8;
3308 /* For AIC we skip quant/dequant of INTRADC */
3313 /* note: block[0] is assumed to be positive */
3314 block[0] = (block[0] + (q >> 1)) / q;
3317 qmat = s->q_intra_matrix[qscale];
3318 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3319 bias= 1<<(QMAT_SHIFT-1);
3320 length = s->intra_ac_vlc_length;
3321 last_length= s->intra_ac_vlc_last_length;
3325 qmat = s->q_inter_matrix[qscale];
3326 length = s->inter_ac_vlc_length;
3327 last_length= s->inter_ac_vlc_last_length;
3331 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3332 threshold2= (threshold1<<1);
3334 for(i=63; i>=start_i; i--) {
3335 const int j = scantable[i];
3336 int level = block[j] * qmat[j];
3338 if(((unsigned)(level+threshold1))>threshold2){
3344 for(i=start_i; i<=last_non_zero; i++) {
3345 const int j = scantable[i];
3346 int level = block[j] * qmat[j];
3348 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3349 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3350 if(((unsigned)(level+threshold1))>threshold2){
3352 level= (bias + level)>>QMAT_SHIFT;
3354 coeff[1][i]= level-1;
3355 // coeff[2][k]= level-2;
3357 level= (bias - level)>>QMAT_SHIFT;
3358 coeff[0][i]= -level;
3359 coeff[1][i]= -level+1;
3360 // coeff[2][k]= -level+2;
3362 coeff_count[i]= FFMIN(level, 2);
3363 assert(coeff_count[i]);
3366 coeff[0][i]= (level>>31)|1;
3371 *overflow= s->max_qcoeff < max; //overflow might have happened
3373 if(last_non_zero < start_i){
3374 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3375 return last_non_zero;
3378 score_tab[start_i]= 0;
3379 survivor[0]= start_i;
3382 for(i=start_i; i<=last_non_zero; i++){
3383 int level_index, j, zero_distortion;
3384 int dct_coeff= FFABS(block[ scantable[i] ]);
3385 int best_score=256*256*256*120;
3387 if (s->dsp.fdct == ff_fdct_ifast)
3388 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3389 zero_distortion= dct_coeff*dct_coeff;
3391 for(level_index=0; level_index < coeff_count[i]; level_index++){
3393 int level= coeff[level_index][i];
3394 const int alevel= FFABS(level);
3399 if(s->out_format == FMT_H263){
3400 unquant_coeff= alevel*qmul + qadd;
3402 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3404 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3405 unquant_coeff = (unquant_coeff - 1) | 1;
3407 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3408 unquant_coeff = (unquant_coeff - 1) | 1;
3413 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3415 if((level&(~127)) == 0){
3416 for(j=survivor_count-1; j>=0; j--){
3417 int run= i - survivor[j];
3418 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3419 score += score_tab[i-run];
3421 if(score < best_score){
3424 level_tab[i+1]= level-64;
3428 if(s->out_format == FMT_H263){
3429 for(j=survivor_count-1; j>=0; j--){
3430 int run= i - survivor[j];
3431 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3432 score += score_tab[i-run];
3433 if(score < last_score){
3436 last_level= level-64;
3442 distortion += esc_length*lambda;
3443 for(j=survivor_count-1; j>=0; j--){
3444 int run= i - survivor[j];
3445 int score= distortion + score_tab[i-run];
3447 if(score < best_score){
3450 level_tab[i+1]= level-64;
3454 if(s->out_format == FMT_H263){
3455 for(j=survivor_count-1; j>=0; j--){
3456 int run= i - survivor[j];
3457 int score= distortion + score_tab[i-run];
3458 if(score < last_score){
3461 last_level= level-64;
3469 score_tab[i+1]= best_score;
3471 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3472 if(last_non_zero <= 27){
3473 for(; survivor_count; survivor_count--){
3474 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3478 for(; survivor_count; survivor_count--){
3479 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3484 survivor[ survivor_count++ ]= i+1;
3487 if(s->out_format != FMT_H263){
3488 last_score= 256*256*256*120;
3489 for(i= survivor[0]; i<=last_non_zero + 1; i++){
3490 int score= score_tab[i];
3491 if(i) score += lambda*2; //FIXME exacter?
3493 if(score < last_score){
3496 last_level= level_tab[i];
3497 last_run= run_tab[i];
3502 s->coded_score[n] = last_score;
3504 dc= FFABS(block[0]);
3505 last_non_zero= last_i - 1;
3506 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3508 if(last_non_zero < start_i)
3509 return last_non_zero;
3511 if(last_non_zero == 0 && start_i == 0){
3513 int best_score= dc * dc;
3515 for(i=0; i<coeff_count[0]; i++){
3516 int level= coeff[i][0];
3517 int alevel= FFABS(level);
3518 int unquant_coeff, score, distortion;
3520 if(s->out_format == FMT_H263){
3521 unquant_coeff= (alevel*qmul + qadd)>>3;
3523 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3524 unquant_coeff = (unquant_coeff - 1) | 1;
3526 unquant_coeff = (unquant_coeff + 4) >> 3;
3527 unquant_coeff<<= 3 + 3;
3529 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3531 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3532 else score= distortion + esc_length*lambda;
3534 if(score < best_score){
3536 best_level= level - 64;
3539 block[0]= best_level;
3540 s->coded_score[n] = best_score - dc*dc;
3541 if(best_level == 0) return -1;
3542 else return last_non_zero;
3548 block[ perm_scantable[last_non_zero] ]= last_level;
3551 for(; i>start_i; i -= run_tab[i] + 1){
3552 block[ perm_scantable[i-1] ]= level_tab[i];
3555 return last_non_zero;
3558 //#define REFINE_STATS 1
3559 static int16_t basis[64][64];
3561 static void build_basis(uint8_t *perm){
3568 double s= 0.25*(1<<BASIS_SHIFT);
3570 int perm_index= perm[index];
3571 if(i==0) s*= sqrt(0.5);
3572 if(j==0) s*= sqrt(0.5);
3573 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3580 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3581 int16_t *block, int16_t *weight, int16_t *orig,
3584 LOCAL_ALIGNED_16(int16_t, d1, [64]);
3585 const uint8_t *scantable= s->intra_scantable.scantable;
3586 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3587 // unsigned int threshold1, threshold2;
3592 int qmul, qadd, start_i, last_non_zero, i, dc;
3594 uint8_t * last_length;
3596 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3599 static int after_last=0;
3600 static int to_zero=0;
3601 static int from_zero=0;
3604 static int messed_sign=0;
3607 if(basis[0][0] == 0)
3608 build_basis(s->dsp.idct_permutation);
3619 /* For AIC we skip quant/dequant of INTRADC */
3623 q <<= RECON_SHIFT-3;
3624 /* note: block[0] is assumed to be positive */
3626 // block[0] = (block[0] + (q >> 1)) / q;
3628 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3629 // bias= 1<<(QMAT_SHIFT-1);
3630 length = s->intra_ac_vlc_length;
3631 last_length= s->intra_ac_vlc_last_length;
3635 length = s->inter_ac_vlc_length;
3636 last_length= s->inter_ac_vlc_last_length;
3638 last_non_zero = s->block_last_index[n];
3643 dc += (1<<(RECON_SHIFT-1));
3644 for(i=0; i<64; i++){
3645 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3648 STOP_TIMER("memset rem[]")}
3651 for(i=0; i<64; i++){
3656 w= FFABS(weight[i]) + qns*one;
3657 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3660 // w=weight[i] = (63*qns + (w/2)) / w;
3666 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3672 for(i=start_i; i<=last_non_zero; i++){
3673 int j= perm_scantable[i];
3674 const int level= block[j];
3678 if(level<0) coeff= qmul*level - qadd;
3679 else coeff= qmul*level + qadd;
3680 run_tab[rle_index++]=run;
3683 s->dsp.add_8x8basis(rem, basis[j], coeff);
3689 if(last_non_zero>0){
3690 STOP_TIMER("init rem[]")
3697 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3700 int run2, best_unquant_change=0, analyze_gradient;
3704 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3706 if(analyze_gradient){
3710 for(i=0; i<64; i++){
3713 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3716 STOP_TIMER("rem*w*w")}
3726 const int level= block[0];
3727 int change, old_coeff;
3729 assert(s->mb_intra);
3733 for(change=-1; change<=1; change+=2){
3734 int new_level= level + change;
3735 int score, new_coeff;
3737 new_coeff= q*new_level;
3738 if(new_coeff >= 2048 || new_coeff < 0)
3741 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3742 if(score<best_score){
3745 best_change= change;
3746 best_unquant_change= new_coeff - old_coeff;
3753 run2= run_tab[rle_index++];
3757 for(i=start_i; i<64; i++){
3758 int j= perm_scantable[i];
3759 const int level= block[j];
3760 int change, old_coeff;
3762 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3766 if(level<0) old_coeff= qmul*level - qadd;
3767 else old_coeff= qmul*level + qadd;
3768 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3772 assert(run2>=0 || i >= last_non_zero );
3775 for(change=-1; change<=1; change+=2){
3776 int new_level= level + change;
3777 int score, new_coeff, unquant_change;
3780 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3784 if(new_level<0) new_coeff= qmul*new_level - qadd;
3785 else new_coeff= qmul*new_level + qadd;
3786 if(new_coeff >= 2048 || new_coeff <= -2048)
3788 //FIXME check for overflow
3791 if(level < 63 && level > -63){
3792 if(i < last_non_zero)
3793 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
3794 - length[UNI_AC_ENC_INDEX(run, level+64)];
3796 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3797 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3800 assert(FFABS(new_level)==1);
3802 if(analyze_gradient){
3803 int g= d1[ scantable[i] ];
3804 if(g && (g^new_level) >= 0)
3808 if(i < last_non_zero){
3809 int next_i= i + run2 + 1;
3810 int next_level= block[ perm_scantable[next_i] ] + 64;
3812 if(next_level&(~127))
3815 if(next_i < last_non_zero)
3816 score += length[UNI_AC_ENC_INDEX(run, 65)]
3817 + length[UNI_AC_ENC_INDEX(run2, next_level)]
3818 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3820 score += length[UNI_AC_ENC_INDEX(run, 65)]
3821 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3822 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3824 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3826 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3827 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3833 assert(FFABS(level)==1);
3835 if(i < last_non_zero){
3836 int next_i= i + run2 + 1;
3837 int next_level= block[ perm_scantable[next_i] ] + 64;
3839 if(next_level&(~127))
3842 if(next_i < last_non_zero)
3843 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3844 - length[UNI_AC_ENC_INDEX(run2, next_level)]
3845 - length[UNI_AC_ENC_INDEX(run, 65)];
3847 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3848 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3849 - length[UNI_AC_ENC_INDEX(run, 65)];
3851 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3853 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3854 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3861 unquant_change= new_coeff - old_coeff;
3862 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3864 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3865 if(score<best_score){
3868 best_change= change;
3869 best_unquant_change= unquant_change;
3873 prev_level= level + 64;
3874 if(prev_level&(~127))
3883 STOP_TIMER("iterative step")}
3887 int j= perm_scantable[ best_coeff ];
3889 block[j] += best_change;
3891 if(best_coeff > last_non_zero){
3892 last_non_zero= best_coeff;
3900 if(block[j] - best_change){
3901 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
3913 for(; last_non_zero>=start_i; last_non_zero--){
3914 if(block[perm_scantable[last_non_zero]])
3920 if(256*256*256*64 % count == 0){
3921 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
3926 for(i=start_i; i<=last_non_zero; i++){
3927 int j= perm_scantable[i];
3928 const int level= block[j];
3931 run_tab[rle_index++]=run;
3938 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
3944 if(last_non_zero>0){
3945 STOP_TIMER("iterative search")
3950 return last_non_zero;
3953 int ff_dct_quantize_c(MpegEncContext *s,
3954 int16_t *block, int n,
3955 int qscale, int *overflow)
3957 int i, j, level, last_non_zero, q, start_i;
3959 const uint8_t *scantable= s->intra_scantable.scantable;
3962 unsigned int threshold1, threshold2;
3964 s->dsp.fdct (block);
3966 if(s->dct_error_sum)
3967 s->denoise_dct(s, block);
3977 /* For AIC we skip quant/dequant of INTRADC */
3980 /* note: block[0] is assumed to be positive */
3981 block[0] = (block[0] + (q >> 1)) / q;
3984 qmat = s->q_intra_matrix[qscale];
3985 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3989 qmat = s->q_inter_matrix[qscale];
3990 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3992 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3993 threshold2= (threshold1<<1);
3994 for(i=63;i>=start_i;i--) {
3996 level = block[j] * qmat[j];
3998 if(((unsigned)(level+threshold1))>threshold2){
4005 for(i=start_i; i<=last_non_zero; i++) {
4007 level = block[j] * qmat[j];
4009 // if( bias+level >= (1<<QMAT_SHIFT)
4010 // || bias-level >= (1<<QMAT_SHIFT)){
4011 if(((unsigned)(level+threshold1))>threshold2){
4013 level= (bias + level)>>QMAT_SHIFT;
4016 level= (bias - level)>>QMAT_SHIFT;
4024 *overflow= s->max_qcoeff < max; //overflow might have happened
4026 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4027 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4028 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4030 return last_non_zero;
4033 #define OFFSET(x) offsetof(MpegEncContext, x)
4034 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4035 static const AVOption h263_options[] = {
4036 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4037 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4038 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4043 static const AVClass h263_class = {
4044 .class_name = "H.263 encoder",
4045 .item_name = av_default_item_name,
4046 .option = h263_options,
4047 .version = LIBAVUTIL_VERSION_INT,
4050 AVCodec ff_h263_encoder = {
4052 .type = AVMEDIA_TYPE_VIDEO,
4053 .id = AV_CODEC_ID_H263,
4054 .priv_data_size = sizeof(MpegEncContext),
4055 .init = ff_MPV_encode_init,
4056 .encode2 = ff_MPV_encode_picture,
4057 .close = ff_MPV_encode_end,
4058 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4059 .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4060 .priv_class = &h263_class,
4063 static const AVOption h263p_options[] = {
4064 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4065 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4066 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4067 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4071 static const AVClass h263p_class = {
4072 .class_name = "H.263p encoder",
4073 .item_name = av_default_item_name,
4074 .option = h263p_options,
4075 .version = LIBAVUTIL_VERSION_INT,
4078 AVCodec ff_h263p_encoder = {
4080 .type = AVMEDIA_TYPE_VIDEO,
4081 .id = AV_CODEC_ID_H263P,
4082 .priv_data_size = sizeof(MpegEncContext),
4083 .init = ff_MPV_encode_init,
4084 .encode2 = ff_MPV_encode_picture,
4085 .close = ff_MPV_encode_end,
4086 .capabilities = CODEC_CAP_SLICE_THREADS,
4087 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4088 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4089 .priv_class = &h263p_class,
4092 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4094 AVCodec ff_msmpeg4v2_encoder = {
4095 .name = "msmpeg4v2",
4096 .type = AVMEDIA_TYPE_VIDEO,
4097 .id = AV_CODEC_ID_MSMPEG4V2,
4098 .priv_data_size = sizeof(MpegEncContext),
4099 .init = ff_MPV_encode_init,
4100 .encode2 = ff_MPV_encode_picture,
4101 .close = ff_MPV_encode_end,
4102 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4103 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4104 .priv_class = &msmpeg4v2_class,
4107 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4109 AVCodec ff_msmpeg4v3_encoder = {
4111 .type = AVMEDIA_TYPE_VIDEO,
4112 .id = AV_CODEC_ID_MSMPEG4V3,
4113 .priv_data_size = sizeof(MpegEncContext),
4114 .init = ff_MPV_encode_init,
4115 .encode2 = ff_MPV_encode_picture,
4116 .close = ff_MPV_encode_end,
4117 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4118 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4119 .priv_class = &msmpeg4v3_class,
4122 FF_MPV_GENERIC_CLASS(wmv1)
4124 AVCodec ff_wmv1_encoder = {
4126 .type = AVMEDIA_TYPE_VIDEO,
4127 .id = AV_CODEC_ID_WMV1,
4128 .priv_data_size = sizeof(MpegEncContext),
4129 .init = ff_MPV_encode_init,
4130 .encode2 = ff_MPV_encode_picture,
4131 .close = ff_MPV_encode_end,
4132 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4133 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4134 .priv_class = &wmv1_class,