2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/internal.h"
31 #include "libavutil/intmath.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/opt.h"
39 #include "mpegvideo.h"
47 #include "aandcttab.h"
49 #include "mpeg4video.h"
51 #include "bytestream.h"
57 static int encode_picture(MpegEncContext *s, int picture_number);
58 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
59 static int sse_mb(MpegEncContext *s);
60 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
61 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
63 /* enable all paranoid tests for rounding, overflows, etc... */
68 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
69 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
71 const AVOption ff_mpv_generic_options[] = {
76 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
77 uint16_t (*qmat16)[2][64],
78 const uint16_t *quant_matrix,
79 int bias, int qmin, int qmax, int intra)
84 for (qscale = qmin; qscale <= qmax; qscale++) {
86 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
87 dsp->fdct == ff_jpeg_fdct_islow_10 ||
88 dsp->fdct == ff_faandct) {
89 for (i = 0; i < 64; i++) {
90 const int j = dsp->idct_permutation[i];
91 /* 16 <= qscale * quant_matrix[i] <= 7905
92 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
93 * 19952 <= x <= 249205026
94 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
95 * 3444240 >= (1 << 36) / (x) >= 275 */
97 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
98 (qscale * quant_matrix[j]));
100 } else if (dsp->fdct == ff_fdct_ifast) {
101 for (i = 0; i < 64; i++) {
102 const int j = dsp->idct_permutation[i];
103 /* 16 <= qscale * quant_matrix[i] <= 7905
104 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
105 * 19952 <= x <= 249205026
106 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
107 * 3444240 >= (1 << 36) / (x) >= 275 */
109 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
110 (ff_aanscales[i] * qscale *
114 for (i = 0; i < 64; i++) {
115 const int j = dsp->idct_permutation[i];
116 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
117 * Assume x = qscale * quant_matrix[i]
119 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
120 * so 32768 >= (1 << 19) / (x) >= 67 */
121 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
122 (qscale * quant_matrix[j]));
123 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
124 // (qscale * quant_matrix[i]);
125 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
126 (qscale * quant_matrix[j]);
128 if (qmat16[qscale][0][i] == 0 ||
129 qmat16[qscale][0][i] == 128 * 256)
130 qmat16[qscale][0][i] = 128 * 256 - 1;
131 qmat16[qscale][1][i] =
132 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
133 qmat16[qscale][0][i]);
137 for (i = intra; i < 64; i++) {
139 if (dsp->fdct == ff_fdct_ifast) {
140 max = (8191LL * ff_aanscales[i]) >> 14;
142 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
148 av_log(NULL, AV_LOG_INFO,
149 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
154 static inline void update_qscale(MpegEncContext *s)
156 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
157 (FF_LAMBDA_SHIFT + 7);
158 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
160 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
164 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
170 for (i = 0; i < 64; i++) {
171 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
178 * init s->current_picture.qscale_table from s->lambda_table
180 void ff_init_qscale_tab(MpegEncContext *s)
182 int8_t * const qscale_table = s->current_picture.qscale_table;
185 for (i = 0; i < s->mb_num; i++) {
186 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
187 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
188 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
193 static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst,
196 dst->pict_type = src->pict_type;
197 dst->quality = src->quality;
198 dst->coded_picture_number = src->coded_picture_number;
199 dst->display_picture_number = src->display_picture_number;
200 //dst->reference = src->reference;
202 dst->interlaced_frame = src->interlaced_frame;
203 dst->top_field_first = src->top_field_first;
206 static void update_duplicate_context_after_me(MpegEncContext *dst,
209 #define COPY(a) dst->a= src->a
211 COPY(current_picture);
217 COPY(picture_in_gop_number);
218 COPY(gop_picture_number);
219 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
220 COPY(progressive_frame); // FIXME don't set in encode_header
221 COPY(partitioned_frame); // FIXME don't set in encode_header
226 * Set the given MpegEncContext to defaults for encoding.
227 * the changed fields will not depend upon the prior state of the MpegEncContext.
229 static void MPV_encode_defaults(MpegEncContext *s)
232 ff_MPV_common_defaults(s);
234 for (i = -16; i < 16; i++) {
235 default_fcode_tab[i + MAX_MV] = 1;
237 s->me.mv_penalty = default_mv_penalty;
238 s->fcode_tab = default_fcode_tab;
241 /* init video encoder */
242 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
244 MpegEncContext *s = avctx->priv_data;
246 int chroma_h_shift, chroma_v_shift;
248 MPV_encode_defaults(s);
250 switch (avctx->codec_id) {
251 case AV_CODEC_ID_MPEG2VIDEO:
252 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
253 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
254 av_log(avctx, AV_LOG_ERROR,
255 "only YUV420 and YUV422 are supported\n");
259 case AV_CODEC_ID_LJPEG:
260 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
261 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
262 avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
263 avctx->pix_fmt != AV_PIX_FMT_BGRA &&
264 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
265 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
266 avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
267 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
268 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
272 case AV_CODEC_ID_MJPEG:
273 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
274 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
275 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
276 avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
277 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
278 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
283 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
284 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
289 switch (avctx->pix_fmt) {
290 case AV_PIX_FMT_YUVJ422P:
291 case AV_PIX_FMT_YUV422P:
292 s->chroma_format = CHROMA_422;
294 case AV_PIX_FMT_YUVJ420P:
295 case AV_PIX_FMT_YUV420P:
297 s->chroma_format = CHROMA_420;
301 s->bit_rate = avctx->bit_rate;
302 s->width = avctx->width;
303 s->height = avctx->height;
304 if (avctx->gop_size > 600 &&
305 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
306 av_log(avctx, AV_LOG_ERROR,
307 "Warning keyframe interval too large! reducing it ...\n");
308 avctx->gop_size = 600;
310 s->gop_size = avctx->gop_size;
312 s->flags = avctx->flags;
313 s->flags2 = avctx->flags2;
314 s->max_b_frames = avctx->max_b_frames;
315 s->codec_id = avctx->codec->id;
316 s->strict_std_compliance = avctx->strict_std_compliance;
317 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
318 s->mpeg_quant = avctx->mpeg_quant;
319 s->rtp_mode = !!avctx->rtp_payload_size;
320 s->intra_dc_precision = avctx->intra_dc_precision;
321 s->user_specified_pts = AV_NOPTS_VALUE;
323 if (s->gop_size <= 1) {
330 s->me_method = avctx->me_method;
333 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
335 s->adaptive_quant = (s->avctx->lumi_masking ||
336 s->avctx->dark_masking ||
337 s->avctx->temporal_cplx_masking ||
338 s->avctx->spatial_cplx_masking ||
339 s->avctx->p_masking ||
340 s->avctx->border_masking ||
341 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
344 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
346 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
347 av_log(avctx, AV_LOG_ERROR,
348 "a vbv buffer size is needed, "
349 "for encoding with a maximum bitrate\n");
353 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
354 av_log(avctx, AV_LOG_INFO,
355 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
358 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
359 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
363 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
364 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
368 if (avctx->rc_max_rate &&
369 avctx->rc_max_rate == avctx->bit_rate &&
370 avctx->rc_max_rate != avctx->rc_min_rate) {
371 av_log(avctx, AV_LOG_INFO,
372 "impossible bitrate constraints, this will fail\n");
375 if (avctx->rc_buffer_size &&
376 avctx->bit_rate * (int64_t)avctx->time_base.num >
377 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
378 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
382 if (!s->fixed_qscale &&
383 avctx->bit_rate * av_q2d(avctx->time_base) >
384 avctx->bit_rate_tolerance) {
385 av_log(avctx, AV_LOG_ERROR,
386 "bitrate tolerance too small for bitrate\n");
390 if (s->avctx->rc_max_rate &&
391 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
392 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
393 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
394 90000LL * (avctx->rc_buffer_size - 1) >
395 s->avctx->rc_max_rate * 0xFFFFLL) {
396 av_log(avctx, AV_LOG_INFO,
397 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
398 "specified vbv buffer is too large for the given bitrate!\n");
401 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
402 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
403 s->codec_id != AV_CODEC_ID_FLV1) {
404 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
408 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
409 av_log(avctx, AV_LOG_ERROR,
410 "OBMC is only supported with simple mb decision\n");
414 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
415 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
419 if (s->max_b_frames &&
420 s->codec_id != AV_CODEC_ID_MPEG4 &&
421 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
422 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
423 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
427 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
428 s->codec_id == AV_CODEC_ID_H263 ||
429 s->codec_id == AV_CODEC_ID_H263P) &&
430 (avctx->sample_aspect_ratio.num > 255 ||
431 avctx->sample_aspect_ratio.den > 255)) {
432 av_log(avctx, AV_LOG_ERROR,
433 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
434 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
438 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
439 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
440 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
444 // FIXME mpeg2 uses that too
445 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
446 av_log(avctx, AV_LOG_ERROR,
447 "mpeg2 style quantization not supported by codec\n");
451 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
452 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
456 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
457 s->avctx->mb_decision != FF_MB_DECISION_RD) {
458 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
462 if (s->avctx->scenechange_threshold < 1000000000 &&
463 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
464 av_log(avctx, AV_LOG_ERROR,
465 "closed gop with scene change detection are not supported yet, "
466 "set threshold to 1000000000\n");
470 if (s->flags & CODEC_FLAG_LOW_DELAY) {
471 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
472 av_log(avctx, AV_LOG_ERROR,
473 "low delay forcing is only available for mpeg2\n");
476 if (s->max_b_frames != 0) {
477 av_log(avctx, AV_LOG_ERROR,
478 "b frames cannot be used with low delay\n");
483 if (s->q_scale_type == 1) {
484 if (avctx->qmax > 12) {
485 av_log(avctx, AV_LOG_ERROR,
486 "non linear quant only supports qmax <= 12 currently\n");
491 if (s->avctx->thread_count > 1 &&
492 s->codec_id != AV_CODEC_ID_MPEG4 &&
493 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
494 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
495 (s->codec_id != AV_CODEC_ID_H263P)) {
496 av_log(avctx, AV_LOG_ERROR,
497 "multi threaded encoding not supported by codec\n");
501 if (s->avctx->thread_count < 1) {
502 av_log(avctx, AV_LOG_ERROR,
503 "automatic thread number detection not supported by codec,"
508 if (s->avctx->thread_count > 1)
511 if (!avctx->time_base.den || !avctx->time_base.num) {
512 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
516 i = (INT_MAX / 2 + 128) >> 8;
517 if (avctx->mb_threshold >= i) {
518 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
523 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
524 av_log(avctx, AV_LOG_INFO,
525 "notice: b_frame_strategy only affects the first pass\n");
526 avctx->b_frame_strategy = 0;
529 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
531 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
532 avctx->time_base.den /= i;
533 avctx->time_base.num /= i;
537 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
538 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
539 // (a + x * 3 / 8) / x
540 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
541 s->inter_quant_bias = 0;
543 s->intra_quant_bias = 0;
545 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
548 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
549 s->intra_quant_bias = avctx->intra_quant_bias;
550 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
551 s->inter_quant_bias = avctx->inter_quant_bias;
553 av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
556 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
557 s->avctx->time_base.den > (1 << 16) - 1) {
558 av_log(avctx, AV_LOG_ERROR,
559 "timebase %d/%d not supported by MPEG 4 standard, "
560 "the maximum admitted value for the timebase denominator "
561 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
565 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
567 switch (avctx->codec->id) {
568 case AV_CODEC_ID_MPEG1VIDEO:
569 s->out_format = FMT_MPEG1;
570 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
571 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
573 case AV_CODEC_ID_MPEG2VIDEO:
574 s->out_format = FMT_MPEG1;
575 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
576 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
579 case AV_CODEC_ID_LJPEG:
580 case AV_CODEC_ID_MJPEG:
581 s->out_format = FMT_MJPEG;
582 s->intra_only = 1; /* force intra only for jpeg */
583 if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
584 avctx->pix_fmt == AV_PIX_FMT_BGRA) {
585 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
586 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
587 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
589 s->mjpeg_vsample[0] = 2;
590 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
591 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
592 s->mjpeg_hsample[0] = 2;
593 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
594 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
596 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
597 ff_mjpeg_encode_init(s) < 0)
602 case AV_CODEC_ID_H261:
603 if (!CONFIG_H261_ENCODER)
605 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
606 av_log(avctx, AV_LOG_ERROR,
607 "The specified picture size of %dx%d is not valid for the "
608 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
609 s->width, s->height);
612 s->out_format = FMT_H261;
616 case AV_CODEC_ID_H263:
617 if (!CONFIG_H263_ENCODER)
619 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
620 s->width, s->height) == 8) {
621 av_log(avctx, AV_LOG_INFO,
622 "The specified picture size of %dx%d is not valid for "
623 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
624 "352x288, 704x576, and 1408x1152."
625 "Try H.263+.\n", s->width, s->height);
628 s->out_format = FMT_H263;
632 case AV_CODEC_ID_H263P:
633 s->out_format = FMT_H263;
636 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
637 s->modified_quant = s->h263_aic;
638 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
639 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
642 /* These are just to be sure */
646 case AV_CODEC_ID_FLV1:
647 s->out_format = FMT_H263;
648 s->h263_flv = 2; /* format = 1; 11-bit codes */
649 s->unrestricted_mv = 1;
650 s->rtp_mode = 0; /* don't allow GOB */
654 case AV_CODEC_ID_RV10:
655 s->out_format = FMT_H263;
659 case AV_CODEC_ID_RV20:
660 s->out_format = FMT_H263;
663 s->modified_quant = 1;
667 s->unrestricted_mv = 0;
669 case AV_CODEC_ID_MPEG4:
670 s->out_format = FMT_H263;
672 s->unrestricted_mv = 1;
673 s->low_delay = s->max_b_frames ? 0 : 1;
674 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
676 case AV_CODEC_ID_MSMPEG4V2:
677 s->out_format = FMT_H263;
679 s->unrestricted_mv = 1;
680 s->msmpeg4_version = 2;
684 case AV_CODEC_ID_MSMPEG4V3:
685 s->out_format = FMT_H263;
687 s->unrestricted_mv = 1;
688 s->msmpeg4_version = 3;
689 s->flipflop_rounding = 1;
693 case AV_CODEC_ID_WMV1:
694 s->out_format = FMT_H263;
696 s->unrestricted_mv = 1;
697 s->msmpeg4_version = 4;
698 s->flipflop_rounding = 1;
702 case AV_CODEC_ID_WMV2:
703 s->out_format = FMT_H263;
705 s->unrestricted_mv = 1;
706 s->msmpeg4_version = 5;
707 s->flipflop_rounding = 1;
715 avctx->has_b_frames = !s->low_delay;
719 s->progressive_frame =
720 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
721 CODEC_FLAG_INTERLACED_ME) ||
725 if (ff_MPV_common_init(s) < 0)
729 ff_MPV_encode_init_x86(s);
731 if (!s->dct_quantize)
732 s->dct_quantize = ff_dct_quantize_c;
734 s->denoise_dct = denoise_dct_c;
735 s->fast_dct_quantize = s->dct_quantize;
737 s->dct_quantize = dct_quantize_trellis_c;
739 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
740 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
742 s->quant_precision = 5;
744 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
745 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
747 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
748 ff_h261_encode_init(s);
749 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
750 ff_h263_encode_init(s);
751 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
752 ff_msmpeg4_encode_init(s);
753 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
754 && s->out_format == FMT_MPEG1)
755 ff_mpeg1_encode_init(s);
758 for (i = 0; i < 64; i++) {
759 int j = s->dsp.idct_permutation[i];
760 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
762 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
763 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
764 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
766 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
769 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
770 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
772 if (s->avctx->intra_matrix)
773 s->intra_matrix[j] = s->avctx->intra_matrix[i];
774 if (s->avctx->inter_matrix)
775 s->inter_matrix[j] = s->avctx->inter_matrix[i];
778 /* precompute matrix */
779 /* for mjpeg, we do include qscale in the matrix */
780 if (s->out_format != FMT_MJPEG) {
781 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
782 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
784 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
785 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
789 if (ff_rate_control_init(s) < 0)
795 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
797 MpegEncContext *s = avctx->priv_data;
799 ff_rate_control_uninit(s);
801 ff_MPV_common_end(s);
802 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
803 s->out_format == FMT_MJPEG)
804 ff_mjpeg_encode_close(s);
806 av_freep(&avctx->extradata);
811 static int get_sae(uint8_t *src, int ref, int stride)
816 for (y = 0; y < 16; y++) {
817 for (x = 0; x < 16; x++) {
818 acc += FFABS(src[x + y * stride] - ref);
825 static int get_intra_count(MpegEncContext *s, uint8_t *src,
826 uint8_t *ref, int stride)
834 for (y = 0; y < h; y += 16) {
835 for (x = 0; x < w; x += 16) {
836 int offset = x + y * stride;
837 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
839 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
840 int sae = get_sae(src + offset, mean, stride);
842 acc += sae + 500 < sad;
849 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
853 int i, display_picture_number = 0, ret;
854 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
855 (s->low_delay ? 0 : 1);
860 display_picture_number = s->input_picture_number++;
862 if (pts != AV_NOPTS_VALUE) {
863 if (s->user_specified_pts != AV_NOPTS_VALUE) {
865 int64_t last = s->user_specified_pts;
868 av_log(s->avctx, AV_LOG_ERROR,
869 "Error, Invalid timestamp=%"PRId64", "
870 "last=%"PRId64"\n", pts, s->user_specified_pts);
874 if (!s->low_delay && display_picture_number == 1)
875 s->dts_delta = time - last;
877 s->user_specified_pts = pts;
879 if (s->user_specified_pts != AV_NOPTS_VALUE) {
880 s->user_specified_pts =
881 pts = s->user_specified_pts + 1;
882 av_log(s->avctx, AV_LOG_INFO,
883 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
886 pts = display_picture_number;
892 if (!pic_arg->buf[0]);
894 if (pic_arg->linesize[0] != s->linesize)
896 if (pic_arg->linesize[1] != s->uvlinesize)
898 if (pic_arg->linesize[2] != s->uvlinesize)
901 av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0],
902 pic_arg->linesize[1], s->linesize, s->uvlinesize);
905 i = ff_find_unused_picture(s, 1);
909 pic = &s->picture[i];
912 if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
914 if (ff_alloc_picture(s, pic, 1) < 0) {
918 i = ff_find_unused_picture(s, 0);
922 pic = &s->picture[i];
925 if (ff_alloc_picture(s, pic, 0) < 0) {
929 if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
930 pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
931 pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
934 int h_chroma_shift, v_chroma_shift;
935 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
939 for (i = 0; i < 3; i++) {
940 int src_stride = pic_arg->linesize[i];
941 int dst_stride = i ? s->uvlinesize : s->linesize;
942 int h_shift = i ? h_chroma_shift : 0;
943 int v_shift = i ? v_chroma_shift : 0;
944 int w = s->width >> h_shift;
945 int h = s->height >> v_shift;
946 uint8_t *src = pic_arg->data[i];
947 uint8_t *dst = pic->f.data[i];
949 if (!s->avctx->rc_buffer_size)
950 dst += INPLACE_OFFSET;
952 if (src_stride == dst_stride)
953 memcpy(dst, src, src_stride * h);
964 copy_picture_attributes(s, &pic->f, pic_arg);
965 pic->f.display_picture_number = display_picture_number;
966 pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
969 /* shift buffer entries */
970 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
971 s->input_picture[i - 1] = s->input_picture[i];
973 s->input_picture[encoding_delay] = (Picture*) pic;
978 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
984 for (plane = 0; plane < 3; plane++) {
985 const int stride = p->f.linesize[plane];
986 const int bw = plane ? 1 : 2;
987 for (y = 0; y < s->mb_height * bw; y++) {
988 for (x = 0; x < s->mb_width * bw; x++) {
989 int off = p->shared ? 0 : 16;
990 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
991 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
992 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
994 switch (s->avctx->frame_skip_exp) {
995 case 0: score = FFMAX(score, v); break;
996 case 1: score += FFABS(v); break;
997 case 2: score += v * v; break;
998 case 3: score64 += FFABS(v * v * (int64_t)v); break;
999 case 4: score64 += v * v * (int64_t)(v * v); break;
1008 if (score64 < s->avctx->frame_skip_threshold)
1010 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1015 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1017 AVPacket pkt = { 0 };
1018 int ret, got_output;
1020 av_init_packet(&pkt);
1021 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1026 av_free_packet(&pkt);
1030 static int estimate_best_b_count(MpegEncContext *s)
1032 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1033 AVCodecContext *c = avcodec_alloc_context3(NULL);
1034 AVFrame input[FF_MAX_B_FRAMES + 2];
1035 const int scale = s->avctx->brd_scale;
1036 int i, j, out_size, p_lambda, b_lambda, lambda2;
1037 int64_t best_rd = INT64_MAX;
1038 int best_b_count = -1;
1040 assert(scale >= 0 && scale <= 3);
1043 //s->next_picture_ptr->quality;
1044 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1045 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1046 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1047 if (!b_lambda) // FIXME we should do this somewhere else
1048 b_lambda = p_lambda;
1049 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1052 c->width = s->width >> scale;
1053 c->height = s->height >> scale;
1054 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1055 CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1056 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1057 c->mb_decision = s->avctx->mb_decision;
1058 c->me_cmp = s->avctx->me_cmp;
1059 c->mb_cmp = s->avctx->mb_cmp;
1060 c->me_sub_cmp = s->avctx->me_sub_cmp;
1061 c->pix_fmt = AV_PIX_FMT_YUV420P;
1062 c->time_base = s->avctx->time_base;
1063 c->max_b_frames = s->max_b_frames;
1065 if (avcodec_open2(c, codec, NULL) < 0)
1068 for (i = 0; i < s->max_b_frames + 2; i++) {
1069 int ysize = c->width * c->height;
1070 int csize = (c->width / 2) * (c->height / 2);
1071 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1072 s->next_picture_ptr;
1074 avcodec_get_frame_defaults(&input[i]);
1075 input[i].data[0] = av_malloc(ysize + 2 * csize);
1076 input[i].data[1] = input[i].data[0] + ysize;
1077 input[i].data[2] = input[i].data[1] + csize;
1078 input[i].linesize[0] = c->width;
1079 input[i].linesize[1] =
1080 input[i].linesize[2] = c->width / 2;
1082 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1083 pre_input = *pre_input_ptr;
1085 if (!pre_input.shared && i) {
1086 pre_input.f.data[0] += INPLACE_OFFSET;
1087 pre_input.f.data[1] += INPLACE_OFFSET;
1088 pre_input.f.data[2] += INPLACE_OFFSET;
1091 s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1092 pre_input.f.data[0], pre_input.f.linesize[0],
1093 c->width, c->height);
1094 s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1095 pre_input.f.data[1], pre_input.f.linesize[1],
1096 c->width >> 1, c->height >> 1);
1097 s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1098 pre_input.f.data[2], pre_input.f.linesize[2],
1099 c->width >> 1, c->height >> 1);
1103 for (j = 0; j < s->max_b_frames + 1; j++) {
1106 if (!s->input_picture[j])
1109 c->error[0] = c->error[1] = c->error[2] = 0;
1111 input[0].pict_type = AV_PICTURE_TYPE_I;
1112 input[0].quality = 1 * FF_QP2LAMBDA;
1114 out_size = encode_frame(c, &input[0]);
1116 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1118 for (i = 0; i < s->max_b_frames + 1; i++) {
1119 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1121 input[i + 1].pict_type = is_p ?
1122 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1123 input[i + 1].quality = is_p ? p_lambda : b_lambda;
1125 out_size = encode_frame(c, &input[i + 1]);
1127 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1130 /* get the delayed frames */
1132 out_size = encode_frame(c, NULL);
1133 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1136 rd += c->error[0] + c->error[1] + c->error[2];
1147 for (i = 0; i < s->max_b_frames + 2; i++) {
1148 av_freep(&input[i].data[0]);
1151 return best_b_count;
1154 static int select_input_picture(MpegEncContext *s)
1158 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1159 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1160 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1162 /* set next picture type & ordering */
1163 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1164 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1165 s->next_picture_ptr == NULL || s->intra_only) {
1166 s->reordered_input_picture[0] = s->input_picture[0];
1167 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1168 s->reordered_input_picture[0]->f.coded_picture_number =
1169 s->coded_picture_number++;
1173 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1174 if (s->picture_in_gop_number < s->gop_size &&
1175 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1176 // FIXME check that te gop check above is +-1 correct
1177 av_frame_unref(&s->input_picture[0]->f);
1180 ff_vbv_update(s, 0);
1186 if (s->flags & CODEC_FLAG_PASS2) {
1187 for (i = 0; i < s->max_b_frames + 1; i++) {
1188 int pict_num = s->input_picture[0]->f.display_picture_number + i;
1190 if (pict_num >= s->rc_context.num_entries)
1192 if (!s->input_picture[i]) {
1193 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1197 s->input_picture[i]->f.pict_type =
1198 s->rc_context.entry[pict_num].new_pict_type;
1202 if (s->avctx->b_frame_strategy == 0) {
1203 b_frames = s->max_b_frames;
1204 while (b_frames && !s->input_picture[b_frames])
1206 } else if (s->avctx->b_frame_strategy == 1) {
1207 for (i = 1; i < s->max_b_frames + 1; i++) {
1208 if (s->input_picture[i] &&
1209 s->input_picture[i]->b_frame_score == 0) {
1210 s->input_picture[i]->b_frame_score =
1212 s->input_picture[i ]->f.data[0],
1213 s->input_picture[i - 1]->f.data[0],
1217 for (i = 0; i < s->max_b_frames + 1; i++) {
1218 if (s->input_picture[i] == NULL ||
1219 s->input_picture[i]->b_frame_score - 1 >
1220 s->mb_num / s->avctx->b_sensitivity)
1224 b_frames = FFMAX(0, i - 1);
1227 for (i = 0; i < b_frames + 1; i++) {
1228 s->input_picture[i]->b_frame_score = 0;
1230 } else if (s->avctx->b_frame_strategy == 2) {
1231 b_frames = estimate_best_b_count(s);
1233 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1239 for (i = b_frames - 1; i >= 0; i--) {
1240 int type = s->input_picture[i]->f.pict_type;
1241 if (type && type != AV_PICTURE_TYPE_B)
1244 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1245 b_frames == s->max_b_frames) {
1246 av_log(s->avctx, AV_LOG_ERROR,
1247 "warning, too many b frames in a row\n");
1250 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1251 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1252 s->gop_size > s->picture_in_gop_number) {
1253 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1255 if (s->flags & CODEC_FLAG_CLOSED_GOP)
1257 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1261 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1262 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1265 s->reordered_input_picture[0] = s->input_picture[b_frames];
1266 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1267 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1268 s->reordered_input_picture[0]->f.coded_picture_number =
1269 s->coded_picture_number++;
1270 for (i = 0; i < b_frames; i++) {
1271 s->reordered_input_picture[i + 1] = s->input_picture[i];
1272 s->reordered_input_picture[i + 1]->f.pict_type =
1274 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1275 s->coded_picture_number++;
1280 if (s->reordered_input_picture[0]) {
1281 s->reordered_input_picture[0]->reference =
1282 s->reordered_input_picture[0]->f.pict_type !=
1283 AV_PICTURE_TYPE_B ? 3 : 0;
1285 ff_mpeg_unref_picture(s, &s->new_picture);
1286 if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1289 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1290 // input is a shared pix, so we can't modifiy it -> alloc a new
1291 // one & ensure that the shared one is reuseable
1294 int i = ff_find_unused_picture(s, 0);
1297 pic = &s->picture[i];
1299 pic->reference = s->reordered_input_picture[0]->reference;
1300 if (ff_alloc_picture(s, pic, 0) < 0) {
1304 copy_picture_attributes(s, &pic->f,
1305 &s->reordered_input_picture[0]->f);
1307 /* mark us unused / free shared pic */
1308 av_frame_unref(&s->reordered_input_picture[0]->f);
1309 s->reordered_input_picture[0]->shared = 0;
1311 s->current_picture_ptr = pic;
1313 // input is not a shared pix -> reuse buffer for current_pix
1314 s->current_picture_ptr = s->reordered_input_picture[0];
1315 for (i = 0; i < 4; i++) {
1316 s->new_picture.f.data[i] += INPLACE_OFFSET;
1319 ff_mpeg_unref_picture(s, &s->current_picture);
1320 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1321 s->current_picture_ptr)) < 0)
1324 s->picture_number = s->new_picture.f.display_picture_number;
1326 ff_mpeg_unref_picture(s, &s->new_picture);
1331 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1332 const AVFrame *pic_arg, int *got_packet)
1334 MpegEncContext *s = avctx->priv_data;
1335 int i, stuffing_count, ret;
1336 int context_count = s->slice_context_count;
1338 s->picture_in_gop_number++;
1340 if (load_input_picture(s, pic_arg) < 0)
1343 if (select_input_picture(s) < 0) {
1348 if (s->new_picture.f.data[0]) {
1350 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1353 s->mb_info_ptr = av_packet_new_side_data(pkt,
1354 AV_PKT_DATA_H263_MB_INFO,
1355 s->mb_width*s->mb_height*12);
1356 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1359 for (i = 0; i < context_count; i++) {
1360 int start_y = s->thread_context[i]->start_mb_y;
1361 int end_y = s->thread_context[i]-> end_mb_y;
1362 int h = s->mb_height;
1363 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1364 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1366 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1369 s->pict_type = s->new_picture.f.pict_type;
1371 ff_MPV_frame_start(s, avctx);
1373 if (encode_picture(s, s->picture_number) < 0)
1376 avctx->header_bits = s->header_bits;
1377 avctx->mv_bits = s->mv_bits;
1378 avctx->misc_bits = s->misc_bits;
1379 avctx->i_tex_bits = s->i_tex_bits;
1380 avctx->p_tex_bits = s->p_tex_bits;
1381 avctx->i_count = s->i_count;
1382 // FIXME f/b_count in avctx
1383 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1384 avctx->skip_count = s->skip_count;
1386 ff_MPV_frame_end(s);
1388 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1389 ff_mjpeg_encode_picture_trailer(s);
1391 if (avctx->rc_buffer_size) {
1392 RateControlContext *rcc = &s->rc_context;
1393 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1395 if (put_bits_count(&s->pb) > max_size &&
1396 s->lambda < s->avctx->lmax) {
1397 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1398 (s->qscale + 1) / s->qscale);
1399 if (s->adaptive_quant) {
1401 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1402 s->lambda_table[i] =
1403 FFMAX(s->lambda_table[i] + 1,
1404 s->lambda_table[i] * (s->qscale + 1) /
1407 s->mb_skipped = 0; // done in MPV_frame_start()
1408 // done in encode_picture() so we must undo it
1409 if (s->pict_type == AV_PICTURE_TYPE_P) {
1410 if (s->flipflop_rounding ||
1411 s->codec_id == AV_CODEC_ID_H263P ||
1412 s->codec_id == AV_CODEC_ID_MPEG4)
1413 s->no_rounding ^= 1;
1415 if (s->pict_type != AV_PICTURE_TYPE_B) {
1416 s->time_base = s->last_time_base;
1417 s->last_non_b_time = s->time - s->pp_time;
1419 for (i = 0; i < context_count; i++) {
1420 PutBitContext *pb = &s->thread_context[i]->pb;
1421 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1426 assert(s->avctx->rc_max_rate);
1429 if (s->flags & CODEC_FLAG_PASS1)
1430 ff_write_pass1_stats(s);
1432 for (i = 0; i < 4; i++) {
1433 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1434 avctx->error[i] += s->current_picture_ptr->f.error[i];
1437 if (s->flags & CODEC_FLAG_PASS1)
1438 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1439 avctx->i_tex_bits + avctx->p_tex_bits ==
1440 put_bits_count(&s->pb));
1441 flush_put_bits(&s->pb);
1442 s->frame_bits = put_bits_count(&s->pb);
1444 stuffing_count = ff_vbv_update(s, s->frame_bits);
1445 if (stuffing_count) {
1446 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1447 stuffing_count + 50) {
1448 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1452 switch (s->codec_id) {
1453 case AV_CODEC_ID_MPEG1VIDEO:
1454 case AV_CODEC_ID_MPEG2VIDEO:
1455 while (stuffing_count--) {
1456 put_bits(&s->pb, 8, 0);
1459 case AV_CODEC_ID_MPEG4:
1460 put_bits(&s->pb, 16, 0);
1461 put_bits(&s->pb, 16, 0x1C3);
1462 stuffing_count -= 4;
1463 while (stuffing_count--) {
1464 put_bits(&s->pb, 8, 0xFF);
1468 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1470 flush_put_bits(&s->pb);
1471 s->frame_bits = put_bits_count(&s->pb);
1474 /* update mpeg1/2 vbv_delay for CBR */
1475 if (s->avctx->rc_max_rate &&
1476 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1477 s->out_format == FMT_MPEG1 &&
1478 90000LL * (avctx->rc_buffer_size - 1) <=
1479 s->avctx->rc_max_rate * 0xFFFFLL) {
1480 int vbv_delay, min_delay;
1481 double inbits = s->avctx->rc_max_rate *
1482 av_q2d(s->avctx->time_base);
1483 int minbits = s->frame_bits - 8 *
1484 (s->vbv_delay_ptr - s->pb.buf - 1);
1485 double bits = s->rc_context.buffer_index + minbits - inbits;
1488 av_log(s->avctx, AV_LOG_ERROR,
1489 "Internal error, negative bits\n");
1491 assert(s->repeat_first_field == 0);
1493 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1494 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1495 s->avctx->rc_max_rate;
1497 vbv_delay = FFMAX(vbv_delay, min_delay);
1499 assert(vbv_delay < 0xFFFF);
1501 s->vbv_delay_ptr[0] &= 0xF8;
1502 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1503 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1504 s->vbv_delay_ptr[2] &= 0x07;
1505 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1506 avctx->vbv_delay = vbv_delay * 300;
1508 s->total_bits += s->frame_bits;
1509 avctx->frame_bits = s->frame_bits;
1511 pkt->pts = s->current_picture.f.pts;
1512 if (!s->low_delay) {
1513 if (!s->current_picture.f.coded_picture_number)
1514 pkt->dts = pkt->pts - s->dts_delta;
1516 pkt->dts = s->reordered_pts;
1517 s->reordered_pts = s->input_picture[0]->f.pts;
1519 pkt->dts = pkt->pts;
1520 if (s->current_picture.f.key_frame)
1521 pkt->flags |= AV_PKT_FLAG_KEY;
1523 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1527 assert((s->frame_bits & 7) == 0);
1529 pkt->size = s->frame_bits / 8;
1530 *got_packet = !!pkt->size;
1534 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1535 int n, int threshold)
1537 static const char tab[64] = {
1538 3, 2, 2, 1, 1, 1, 1, 1,
1539 1, 1, 1, 1, 1, 1, 1, 1,
1540 1, 1, 1, 1, 1, 1, 1, 1,
1541 0, 0, 0, 0, 0, 0, 0, 0,
1542 0, 0, 0, 0, 0, 0, 0, 0,
1543 0, 0, 0, 0, 0, 0, 0, 0,
1544 0, 0, 0, 0, 0, 0, 0, 0,
1545 0, 0, 0, 0, 0, 0, 0, 0
1550 int16_t *block = s->block[n];
1551 const int last_index = s->block_last_index[n];
1554 if (threshold < 0) {
1556 threshold = -threshold;
1560 /* Are all we could set to zero already zero? */
1561 if (last_index <= skip_dc - 1)
1564 for (i = 0; i <= last_index; i++) {
1565 const int j = s->intra_scantable.permutated[i];
1566 const int level = FFABS(block[j]);
1568 if (skip_dc && i == 0)
1572 } else if (level > 1) {
1578 if (score >= threshold)
1580 for (i = skip_dc; i <= last_index; i++) {
1581 const int j = s->intra_scantable.permutated[i];
1585 s->block_last_index[n] = 0;
1587 s->block_last_index[n] = -1;
1590 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1594 const int maxlevel = s->max_qcoeff;
1595 const int minlevel = s->min_qcoeff;
1599 i = 1; // skip clipping of intra dc
1603 for (; i <= last_index; i++) {
1604 const int j = s->intra_scantable.permutated[i];
1605 int level = block[j];
1607 if (level > maxlevel) {
1610 } else if (level < minlevel) {
1618 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1619 av_log(s->avctx, AV_LOG_INFO,
1620 "warning, clipping %d dct coefficients to %d..%d\n",
1621 overflow, minlevel, maxlevel);
1624 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1628 for (y = 0; y < 8; y++) {
1629 for (x = 0; x < 8; x++) {
1635 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1636 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1637 int v = ptr[x2 + y2 * stride];
1643 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1648 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1649 int motion_x, int motion_y,
1650 int mb_block_height,
1653 int16_t weight[8][64];
1654 int16_t orig[8][64];
1655 const int mb_x = s->mb_x;
1656 const int mb_y = s->mb_y;
1659 int dct_offset = s->linesize * 8; // default for progressive frames
1660 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1663 for (i = 0; i < mb_block_count; i++)
1664 skip_dct[i] = s->skipdct;
1666 if (s->adaptive_quant) {
1667 const int last_qp = s->qscale;
1668 const int mb_xy = mb_x + mb_y * s->mb_stride;
1670 s->lambda = s->lambda_table[mb_xy];
1673 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1674 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1675 s->dquant = s->qscale - last_qp;
1677 if (s->out_format == FMT_H263) {
1678 s->dquant = av_clip(s->dquant, -2, 2);
1680 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1682 if (s->pict_type == AV_PICTURE_TYPE_B) {
1683 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1686 if (s->mv_type == MV_TYPE_8X8)
1692 ff_set_qscale(s, last_qp + s->dquant);
1693 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1694 ff_set_qscale(s, s->qscale + s->dquant);
1696 wrap_y = s->linesize;
1697 wrap_c = s->uvlinesize;
1698 ptr_y = s->new_picture.f.data[0] +
1699 (mb_y * 16 * wrap_y) + mb_x * 16;
1700 ptr_cb = s->new_picture.f.data[1] +
1701 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1702 ptr_cr = s->new_picture.f.data[2] +
1703 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1705 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1706 uint8_t *ebuf = s->edge_emu_buffer + 32;
1707 s->vdsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1708 mb_y * 16, s->width, s->height);
1710 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
1711 mb_block_height, mb_x * 8, mb_y * 8,
1712 s->width >> 1, s->height >> 1);
1713 ptr_cb = ebuf + 18 * wrap_y;
1714 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
1715 mb_block_height, mb_x * 8, mb_y * 8,
1716 s->width >> 1, s->height >> 1);
1717 ptr_cr = ebuf + 18 * wrap_y + 8;
1721 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1722 int progressive_score, interlaced_score;
1724 s->interlaced_dct = 0;
1725 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1727 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1728 NULL, wrap_y, 8) - 400;
1730 if (progressive_score > 0) {
1731 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1732 NULL, wrap_y * 2, 8) +
1733 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1734 NULL, wrap_y * 2, 8);
1735 if (progressive_score > interlaced_score) {
1736 s->interlaced_dct = 1;
1738 dct_offset = wrap_y;
1740 if (s->chroma_format == CHROMA_422)
1746 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1747 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1748 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1749 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1751 if (s->flags & CODEC_FLAG_GRAY) {
1755 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1756 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1757 if (!s->chroma_y_shift) { /* 422 */
1758 s->dsp.get_pixels(s->block[6],
1759 ptr_cb + (dct_offset >> 1), wrap_c);
1760 s->dsp.get_pixels(s->block[7],
1761 ptr_cr + (dct_offset >> 1), wrap_c);
1765 op_pixels_func (*op_pix)[4];
1766 qpel_mc_func (*op_qpix)[16];
1767 uint8_t *dest_y, *dest_cb, *dest_cr;
1769 dest_y = s->dest[0];
1770 dest_cb = s->dest[1];
1771 dest_cr = s->dest[2];
1773 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1774 op_pix = s->hdsp.put_pixels_tab;
1775 op_qpix = s->dsp.put_qpel_pixels_tab;
1777 op_pix = s->hdsp.put_no_rnd_pixels_tab;
1778 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1781 if (s->mv_dir & MV_DIR_FORWARD) {
1782 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1783 s->last_picture.f.data,
1785 op_pix = s->hdsp.avg_pixels_tab;
1786 op_qpix = s->dsp.avg_qpel_pixels_tab;
1788 if (s->mv_dir & MV_DIR_BACKWARD) {
1789 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1790 s->next_picture.f.data,
1794 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1795 int progressive_score, interlaced_score;
1797 s->interlaced_dct = 0;
1798 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1801 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1802 ptr_y + wrap_y * 8, wrap_y,
1805 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1806 progressive_score -= 400;
1808 if (progressive_score > 0) {
1809 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1812 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1816 if (progressive_score > interlaced_score) {
1817 s->interlaced_dct = 1;
1819 dct_offset = wrap_y;
1821 if (s->chroma_format == CHROMA_422)
1827 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1828 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1829 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1830 dest_y + dct_offset, wrap_y);
1831 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1832 dest_y + dct_offset + 8, wrap_y);
1834 if (s->flags & CODEC_FLAG_GRAY) {
1838 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1839 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1840 if (!s->chroma_y_shift) { /* 422 */
1841 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1842 dest_cb + (dct_offset >> 1), wrap_c);
1843 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1844 dest_cr + (dct_offset >> 1), wrap_c);
1847 /* pre quantization */
1848 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1849 2 * s->qscale * s->qscale) {
1851 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1852 wrap_y, 8) < 20 * s->qscale)
1854 if (s->dsp.sad[1](NULL, ptr_y + 8,
1855 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1857 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1858 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1860 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1861 dest_y + dct_offset + 8,
1862 wrap_y, 8) < 20 * s->qscale)
1864 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1865 wrap_c, 8) < 20 * s->qscale)
1867 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1868 wrap_c, 8) < 20 * s->qscale)
1870 if (!s->chroma_y_shift) { /* 422 */
1871 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1872 dest_cb + (dct_offset >> 1),
1873 wrap_c, 8) < 20 * s->qscale)
1875 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1876 dest_cr + (dct_offset >> 1),
1877 wrap_c, 8) < 20 * s->qscale)
1883 if (s->quantizer_noise_shaping) {
1885 get_visual_weight(weight[0], ptr_y , wrap_y);
1887 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
1889 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
1891 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1893 get_visual_weight(weight[4], ptr_cb , wrap_c);
1895 get_visual_weight(weight[5], ptr_cr , wrap_c);
1896 if (!s->chroma_y_shift) { /* 422 */
1898 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1901 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1904 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
1907 /* DCT & quantize */
1908 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1910 for (i = 0; i < mb_block_count; i++) {
1913 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1914 // FIXME we could decide to change to quantizer instead of
1916 // JS: I don't think that would be a good idea it could lower
1917 // quality instead of improve it. Just INTRADC clipping
1918 // deserves changes in quantizer
1920 clip_coeffs(s, s->block[i], s->block_last_index[i]);
1922 s->block_last_index[i] = -1;
1924 if (s->quantizer_noise_shaping) {
1925 for (i = 0; i < mb_block_count; i++) {
1927 s->block_last_index[i] =
1928 dct_quantize_refine(s, s->block[i], weight[i],
1929 orig[i], i, s->qscale);
1934 if (s->luma_elim_threshold && !s->mb_intra)
1935 for (i = 0; i < 4; i++)
1936 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1937 if (s->chroma_elim_threshold && !s->mb_intra)
1938 for (i = 4; i < mb_block_count; i++)
1939 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1941 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
1942 for (i = 0; i < mb_block_count; i++) {
1943 if (s->block_last_index[i] == -1)
1944 s->coded_score[i] = INT_MAX / 256;
1949 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
1950 s->block_last_index[4] =
1951 s->block_last_index[5] = 0;
1953 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
1956 // non c quantize code returns incorrect block_last_index FIXME
1957 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
1958 for (i = 0; i < mb_block_count; i++) {
1960 if (s->block_last_index[i] > 0) {
1961 for (j = 63; j > 0; j--) {
1962 if (s->block[i][s->intra_scantable.permutated[j]])
1965 s->block_last_index[i] = j;
1970 /* huffman encode */
1971 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
1972 case AV_CODEC_ID_MPEG1VIDEO:
1973 case AV_CODEC_ID_MPEG2VIDEO:
1974 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1975 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
1977 case AV_CODEC_ID_MPEG4:
1978 if (CONFIG_MPEG4_ENCODER)
1979 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
1981 case AV_CODEC_ID_MSMPEG4V2:
1982 case AV_CODEC_ID_MSMPEG4V3:
1983 case AV_CODEC_ID_WMV1:
1984 if (CONFIG_MSMPEG4_ENCODER)
1985 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
1987 case AV_CODEC_ID_WMV2:
1988 if (CONFIG_WMV2_ENCODER)
1989 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
1991 case AV_CODEC_ID_H261:
1992 if (CONFIG_H261_ENCODER)
1993 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
1995 case AV_CODEC_ID_H263:
1996 case AV_CODEC_ID_H263P:
1997 case AV_CODEC_ID_FLV1:
1998 case AV_CODEC_ID_RV10:
1999 case AV_CODEC_ID_RV20:
2000 if (CONFIG_H263_ENCODER)
2001 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2003 case AV_CODEC_ID_MJPEG:
2004 if (CONFIG_MJPEG_ENCODER)
2005 ff_mjpeg_encode_mb(s, s->block);
2012 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2014 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
2015 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2018 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2021 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2024 d->mb_skip_run= s->mb_skip_run;
2026 d->last_dc[i] = s->last_dc[i];
2029 d->mv_bits= s->mv_bits;
2030 d->i_tex_bits= s->i_tex_bits;
2031 d->p_tex_bits= s->p_tex_bits;
2032 d->i_count= s->i_count;
2033 d->f_count= s->f_count;
2034 d->b_count= s->b_count;
2035 d->skip_count= s->skip_count;
2036 d->misc_bits= s->misc_bits;
2040 d->qscale= s->qscale;
2041 d->dquant= s->dquant;
2043 d->esc3_level_length= s->esc3_level_length;
2046 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2049 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2050 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2053 d->mb_skip_run= s->mb_skip_run;
2055 d->last_dc[i] = s->last_dc[i];
2058 d->mv_bits= s->mv_bits;
2059 d->i_tex_bits= s->i_tex_bits;
2060 d->p_tex_bits= s->p_tex_bits;
2061 d->i_count= s->i_count;
2062 d->f_count= s->f_count;
2063 d->b_count= s->b_count;
2064 d->skip_count= s->skip_count;
2065 d->misc_bits= s->misc_bits;
2067 d->mb_intra= s->mb_intra;
2068 d->mb_skipped= s->mb_skipped;
2069 d->mv_type= s->mv_type;
2070 d->mv_dir= s->mv_dir;
2072 if(s->data_partitioning){
2074 d->tex_pb= s->tex_pb;
2078 d->block_last_index[i]= s->block_last_index[i];
2079 d->interlaced_dct= s->interlaced_dct;
2080 d->qscale= s->qscale;
2082 d->esc3_level_length= s->esc3_level_length;
2085 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2086 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2087 int *dmin, int *next_block, int motion_x, int motion_y)
2090 uint8_t *dest_backup[3];
2092 copy_context_before_encode(s, backup, type);
2094 s->block= s->blocks[*next_block];
2095 s->pb= pb[*next_block];
2096 if(s->data_partitioning){
2097 s->pb2 = pb2 [*next_block];
2098 s->tex_pb= tex_pb[*next_block];
2102 memcpy(dest_backup, s->dest, sizeof(s->dest));
2103 s->dest[0] = s->rd_scratchpad;
2104 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2105 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2106 assert(s->linesize >= 32); //FIXME
2109 encode_mb(s, motion_x, motion_y);
2111 score= put_bits_count(&s->pb);
2112 if(s->data_partitioning){
2113 score+= put_bits_count(&s->pb2);
2114 score+= put_bits_count(&s->tex_pb);
2117 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2118 ff_MPV_decode_mb(s, s->block);
2120 score *= s->lambda2;
2121 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2125 memcpy(s->dest, dest_backup, sizeof(s->dest));
2132 copy_context_after_encode(best, s, type);
2136 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2137 uint32_t *sq = ff_squareTbl + 256;
2142 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2143 else if(w==8 && h==8)
2144 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2148 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2157 static int sse_mb(MpegEncContext *s){
2161 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2162 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2165 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2166 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2167 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2168 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2170 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2171 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2172 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2175 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2176 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2177 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2180 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2181 MpegEncContext *s= *(void**)arg;
2185 s->me.dia_size= s->avctx->pre_dia_size;
2186 s->first_slice_line=1;
2187 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2188 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2189 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2191 s->first_slice_line=0;
2199 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2200 MpegEncContext *s= *(void**)arg;
2202 ff_check_alignment();
2204 s->me.dia_size= s->avctx->dia_size;
2205 s->first_slice_line=1;
2206 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2207 s->mb_x=0; //for block init below
2208 ff_init_block_index(s);
2209 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2210 s->block_index[0]+=2;
2211 s->block_index[1]+=2;
2212 s->block_index[2]+=2;
2213 s->block_index[3]+=2;
2215 /* compute motion vector & mb_type and store in context */
2216 if(s->pict_type==AV_PICTURE_TYPE_B)
2217 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2219 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2221 s->first_slice_line=0;
2226 static int mb_var_thread(AVCodecContext *c, void *arg){
2227 MpegEncContext *s= *(void**)arg;
2230 ff_check_alignment();
2232 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2233 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2236 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2238 int sum = s->dsp.pix_sum(pix, s->linesize);
2240 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2242 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2243 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2244 s->me.mb_var_sum_temp += varc;
2250 static void write_slice_end(MpegEncContext *s){
2251 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2252 if(s->partitioned_frame){
2253 ff_mpeg4_merge_partitions(s);
2256 ff_mpeg4_stuffing(&s->pb);
2257 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2258 ff_mjpeg_encode_stuffing(&s->pb);
2261 avpriv_align_put_bits(&s->pb);
2262 flush_put_bits(&s->pb);
2264 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2265 s->misc_bits+= get_bits_diff(s);
2268 static void write_mb_info(MpegEncContext *s)
2270 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2271 int offset = put_bits_count(&s->pb);
2272 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2273 int gobn = s->mb_y / s->gob_index;
2275 if (CONFIG_H263_ENCODER)
2276 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2277 bytestream_put_le32(&ptr, offset);
2278 bytestream_put_byte(&ptr, s->qscale);
2279 bytestream_put_byte(&ptr, gobn);
2280 bytestream_put_le16(&ptr, mba);
2281 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2282 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2283 /* 4MV not implemented */
2284 bytestream_put_byte(&ptr, 0); /* hmv2 */
2285 bytestream_put_byte(&ptr, 0); /* vmv2 */
2288 static void update_mb_info(MpegEncContext *s, int startcode)
2292 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2293 s->mb_info_size += 12;
2294 s->prev_mb_info = s->last_mb_info;
2297 s->prev_mb_info = put_bits_count(&s->pb)/8;
2298 /* This might have incremented mb_info_size above, and we return without
2299 * actually writing any info into that slot yet. But in that case,
2300 * this will be called again at the start of the after writing the
2301 * start code, actually writing the mb info. */
2305 s->last_mb_info = put_bits_count(&s->pb)/8;
2306 if (!s->mb_info_size)
2307 s->mb_info_size += 12;
2311 static int encode_thread(AVCodecContext *c, void *arg){
2312 MpegEncContext *s= *(void**)arg;
2313 int mb_x, mb_y, pdif = 0;
2314 int chr_h= 16>>s->chroma_y_shift;
2316 MpegEncContext best_s, backup_s;
2317 uint8_t bit_buf[2][MAX_MB_BYTES];
2318 uint8_t bit_buf2[2][MAX_MB_BYTES];
2319 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2320 PutBitContext pb[2], pb2[2], tex_pb[2];
2322 ff_check_alignment();
2325 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2326 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2327 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2330 s->last_bits= put_bits_count(&s->pb);
2341 /* init last dc values */
2342 /* note: quant matrix value (8) is implied here */
2343 s->last_dc[i] = 128 << s->intra_dc_precision;
2345 s->current_picture.f.error[i] = 0;
2348 memset(s->last_mv, 0, sizeof(s->last_mv));
2352 switch(s->codec_id){
2353 case AV_CODEC_ID_H263:
2354 case AV_CODEC_ID_H263P:
2355 case AV_CODEC_ID_FLV1:
2356 if (CONFIG_H263_ENCODER)
2357 s->gob_index = ff_h263_get_gob_height(s);
2359 case AV_CODEC_ID_MPEG4:
2360 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2361 ff_mpeg4_init_partitions(s);
2367 s->first_slice_line = 1;
2368 s->ptr_lastgob = s->pb.buf;
2369 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2373 ff_set_qscale(s, s->qscale);
2374 ff_init_block_index(s);
2376 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2377 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2378 int mb_type= s->mb_type[xy];
2383 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2384 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2387 if(s->data_partitioning){
2388 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2389 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2390 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2396 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2397 ff_update_block_index(s);
2399 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2400 ff_h261_reorder_mb_index(s);
2401 xy= s->mb_y*s->mb_stride + s->mb_x;
2402 mb_type= s->mb_type[xy];
2405 /* write gob / video packet header */
2407 int current_packet_size, is_gob_start;
2409 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2411 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2413 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2415 switch(s->codec_id){
2416 case AV_CODEC_ID_H263:
2417 case AV_CODEC_ID_H263P:
2418 if(!s->h263_slice_structured)
2419 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2421 case AV_CODEC_ID_MPEG2VIDEO:
2422 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2423 case AV_CODEC_ID_MPEG1VIDEO:
2424 if(s->mb_skip_run) is_gob_start=0;
2429 if(s->start_mb_y != mb_y || mb_x!=0){
2432 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2433 ff_mpeg4_init_partitions(s);
2437 assert((put_bits_count(&s->pb)&7) == 0);
2438 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2440 if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2441 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2442 int d= 100 / s->avctx->error_rate;
2444 current_packet_size=0;
2445 s->pb.buf_ptr= s->ptr_lastgob;
2446 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2450 if (s->avctx->rtp_callback){
2451 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2452 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2454 update_mb_info(s, 1);
2456 switch(s->codec_id){
2457 case AV_CODEC_ID_MPEG4:
2458 if (CONFIG_MPEG4_ENCODER) {
2459 ff_mpeg4_encode_video_packet_header(s);
2460 ff_mpeg4_clean_buffers(s);
2463 case AV_CODEC_ID_MPEG1VIDEO:
2464 case AV_CODEC_ID_MPEG2VIDEO:
2465 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2466 ff_mpeg1_encode_slice_header(s);
2467 ff_mpeg1_clean_buffers(s);
2470 case AV_CODEC_ID_H263:
2471 case AV_CODEC_ID_H263P:
2472 if (CONFIG_H263_ENCODER)
2473 ff_h263_encode_gob_header(s, mb_y);
2477 if(s->flags&CODEC_FLAG_PASS1){
2478 int bits= put_bits_count(&s->pb);
2479 s->misc_bits+= bits - s->last_bits;
2483 s->ptr_lastgob += current_packet_size;
2484 s->first_slice_line=1;
2485 s->resync_mb_x=mb_x;
2486 s->resync_mb_y=mb_y;
2490 if( (s->resync_mb_x == s->mb_x)
2491 && s->resync_mb_y+1 == s->mb_y){
2492 s->first_slice_line=0;
2496 s->dquant=0; //only for QP_RD
2498 update_mb_info(s, 0);
2500 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2502 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2504 copy_context_before_encode(&backup_s, s, -1);
2506 best_s.data_partitioning= s->data_partitioning;
2507 best_s.partitioned_frame= s->partitioned_frame;
2508 if(s->data_partitioning){
2509 backup_s.pb2= s->pb2;
2510 backup_s.tex_pb= s->tex_pb;
2513 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2514 s->mv_dir = MV_DIR_FORWARD;
2515 s->mv_type = MV_TYPE_16X16;
2517 s->mv[0][0][0] = s->p_mv_table[xy][0];
2518 s->mv[0][0][1] = s->p_mv_table[xy][1];
2519 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2520 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2522 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2523 s->mv_dir = MV_DIR_FORWARD;
2524 s->mv_type = MV_TYPE_FIELD;
2527 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2528 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2529 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2531 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2532 &dmin, &next_block, 0, 0);
2534 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2535 s->mv_dir = MV_DIR_FORWARD;
2536 s->mv_type = MV_TYPE_16X16;
2540 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2541 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2543 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2544 s->mv_dir = MV_DIR_FORWARD;
2545 s->mv_type = MV_TYPE_8X8;
2548 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2549 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2551 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2552 &dmin, &next_block, 0, 0);
2554 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2555 s->mv_dir = MV_DIR_FORWARD;
2556 s->mv_type = MV_TYPE_16X16;
2558 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2559 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2560 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2561 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2563 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2564 s->mv_dir = MV_DIR_BACKWARD;
2565 s->mv_type = MV_TYPE_16X16;
2567 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2568 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2569 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2570 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2572 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2573 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2574 s->mv_type = MV_TYPE_16X16;
2576 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2577 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2578 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2579 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2580 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2581 &dmin, &next_block, 0, 0);
2583 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2584 s->mv_dir = MV_DIR_FORWARD;
2585 s->mv_type = MV_TYPE_FIELD;
2588 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2589 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2590 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2592 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2593 &dmin, &next_block, 0, 0);
2595 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2596 s->mv_dir = MV_DIR_BACKWARD;
2597 s->mv_type = MV_TYPE_FIELD;
2600 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2601 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2602 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2604 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2605 &dmin, &next_block, 0, 0);
2607 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2608 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2609 s->mv_type = MV_TYPE_FIELD;
2611 for(dir=0; dir<2; dir++){
2613 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2614 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2615 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2618 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2619 &dmin, &next_block, 0, 0);
2621 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2623 s->mv_type = MV_TYPE_16X16;
2627 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2628 &dmin, &next_block, 0, 0);
2629 if(s->h263_pred || s->h263_aic){
2631 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2633 ff_clean_intra_table_entries(s); //old mode?
2637 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2638 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2639 const int last_qp= backup_s.qscale;
2642 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2643 static const int dquant_tab[4]={-1,1,-2,2};
2645 assert(backup_s.dquant == 0);
2648 s->mv_dir= best_s.mv_dir;
2649 s->mv_type = MV_TYPE_16X16;
2650 s->mb_intra= best_s.mb_intra;
2651 s->mv[0][0][0] = best_s.mv[0][0][0];
2652 s->mv[0][0][1] = best_s.mv[0][0][1];
2653 s->mv[1][0][0] = best_s.mv[1][0][0];
2654 s->mv[1][0][1] = best_s.mv[1][0][1];
2656 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2657 for(; qpi<4; qpi++){
2658 int dquant= dquant_tab[qpi];
2659 qp= last_qp + dquant;
2660 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2662 backup_s.dquant= dquant;
2663 if(s->mb_intra && s->dc_val[0]){
2665 dc[i]= s->dc_val[0][ s->block_index[i] ];
2666 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2670 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2671 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2672 if(best_s.qscale != qp){
2673 if(s->mb_intra && s->dc_val[0]){
2675 s->dc_val[0][ s->block_index[i] ]= dc[i];
2676 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2683 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2684 int mx= s->b_direct_mv_table[xy][0];
2685 int my= s->b_direct_mv_table[xy][1];
2687 backup_s.dquant = 0;
2688 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2690 ff_mpeg4_set_direct_mv(s, mx, my);
2691 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2692 &dmin, &next_block, mx, my);
2694 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2695 backup_s.dquant = 0;
2696 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2698 ff_mpeg4_set_direct_mv(s, 0, 0);
2699 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2700 &dmin, &next_block, 0, 0);
2702 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2705 coded |= s->block_last_index[i];
2708 memcpy(s->mv, best_s.mv, sizeof(s->mv));
2709 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2710 mx=my=0; //FIXME find the one we actually used
2711 ff_mpeg4_set_direct_mv(s, mx, my);
2712 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2720 s->mv_dir= best_s.mv_dir;
2721 s->mv_type = best_s.mv_type;
2723 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2724 s->mv[0][0][1] = best_s.mv[0][0][1];
2725 s->mv[1][0][0] = best_s.mv[1][0][0];
2726 s->mv[1][0][1] = best_s.mv[1][0][1];*/
2729 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2730 &dmin, &next_block, mx, my);
2735 s->current_picture.qscale_table[xy] = best_s.qscale;
2737 copy_context_after_encode(s, &best_s, -1);
2739 pb_bits_count= put_bits_count(&s->pb);
2740 flush_put_bits(&s->pb);
2741 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2744 if(s->data_partitioning){
2745 pb2_bits_count= put_bits_count(&s->pb2);
2746 flush_put_bits(&s->pb2);
2747 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2748 s->pb2= backup_s.pb2;
2750 tex_pb_bits_count= put_bits_count(&s->tex_pb);
2751 flush_put_bits(&s->tex_pb);
2752 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2753 s->tex_pb= backup_s.tex_pb;
2755 s->last_bits= put_bits_count(&s->pb);
2757 if (CONFIG_H263_ENCODER &&
2758 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2759 ff_h263_update_motion_val(s);
2761 if(next_block==0){ //FIXME 16 vs linesize16
2762 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2763 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2764 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2767 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2768 ff_MPV_decode_mb(s, s->block);
2770 int motion_x = 0, motion_y = 0;
2771 s->mv_type=MV_TYPE_16X16;
2772 // only one MB-Type possible
2775 case CANDIDATE_MB_TYPE_INTRA:
2778 motion_x= s->mv[0][0][0] = 0;
2779 motion_y= s->mv[0][0][1] = 0;
2781 case CANDIDATE_MB_TYPE_INTER:
2782 s->mv_dir = MV_DIR_FORWARD;
2784 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2785 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2787 case CANDIDATE_MB_TYPE_INTER_I:
2788 s->mv_dir = MV_DIR_FORWARD;
2789 s->mv_type = MV_TYPE_FIELD;
2792 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2793 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2794 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2797 case CANDIDATE_MB_TYPE_INTER4V:
2798 s->mv_dir = MV_DIR_FORWARD;
2799 s->mv_type = MV_TYPE_8X8;
2802 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2803 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2806 case CANDIDATE_MB_TYPE_DIRECT:
2807 if (CONFIG_MPEG4_ENCODER) {
2808 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2810 motion_x=s->b_direct_mv_table[xy][0];
2811 motion_y=s->b_direct_mv_table[xy][1];
2812 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2815 case CANDIDATE_MB_TYPE_DIRECT0:
2816 if (CONFIG_MPEG4_ENCODER) {
2817 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2819 ff_mpeg4_set_direct_mv(s, 0, 0);
2822 case CANDIDATE_MB_TYPE_BIDIR:
2823 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2825 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2826 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2827 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2828 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2830 case CANDIDATE_MB_TYPE_BACKWARD:
2831 s->mv_dir = MV_DIR_BACKWARD;
2833 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2834 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2836 case CANDIDATE_MB_TYPE_FORWARD:
2837 s->mv_dir = MV_DIR_FORWARD;
2839 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2840 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2842 case CANDIDATE_MB_TYPE_FORWARD_I:
2843 s->mv_dir = MV_DIR_FORWARD;
2844 s->mv_type = MV_TYPE_FIELD;
2847 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2848 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2849 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2852 case CANDIDATE_MB_TYPE_BACKWARD_I:
2853 s->mv_dir = MV_DIR_BACKWARD;
2854 s->mv_type = MV_TYPE_FIELD;
2857 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2858 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2859 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2862 case CANDIDATE_MB_TYPE_BIDIR_I:
2863 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2864 s->mv_type = MV_TYPE_FIELD;
2866 for(dir=0; dir<2; dir++){
2868 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2869 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2870 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2875 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2878 encode_mb(s, motion_x, motion_y);
2880 // RAL: Update last macroblock type
2881 s->last_mv_dir = s->mv_dir;
2883 if (CONFIG_H263_ENCODER &&
2884 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2885 ff_h263_update_motion_val(s);
2887 ff_MPV_decode_mb(s, s->block);
2890 /* clean the MV table in IPS frames for direct mode in B frames */
2891 if(s->mb_intra /* && I,P,S_TYPE */){
2892 s->p_mv_table[xy][0]=0;
2893 s->p_mv_table[xy][1]=0;
2896 if(s->flags&CODEC_FLAG_PSNR){
2900 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2901 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2903 s->current_picture.f.error[0] += sse(
2904 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2905 s->dest[0], w, h, s->linesize);
2906 s->current_picture.f.error[1] += sse(
2907 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2908 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2909 s->current_picture.f.error[2] += sse(
2910 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2911 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2914 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2915 ff_h263_loop_filter(s);
2917 av_dlog(s->avctx, "MB %d %d bits\n",
2918 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
2922 //not beautiful here but we must write it before flushing so it has to be here
2923 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2924 ff_msmpeg4_encode_ext_header(s);
2928 /* Send the last GOB if RTP */
2929 if (s->avctx->rtp_callback) {
2930 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2931 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2932 /* Call the RTP callback to send the last GOB */
2934 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2940 #define MERGE(field) dst->field += src->field; src->field=0
2941 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2942 MERGE(me.scene_change_score);
2943 MERGE(me.mc_mb_var_sum_temp);
2944 MERGE(me.mb_var_sum_temp);
2947 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2950 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2951 MERGE(dct_count[1]);
2960 MERGE(er.error_count);
2961 MERGE(padding_bug_score);
2962 MERGE(current_picture.f.error[0]);
2963 MERGE(current_picture.f.error[1]);
2964 MERGE(current_picture.f.error[2]);
2966 if(dst->avctx->noise_reduction){
2967 for(i=0; i<64; i++){
2968 MERGE(dct_error_sum[0][i]);
2969 MERGE(dct_error_sum[1][i]);
2973 assert(put_bits_count(&src->pb) % 8 ==0);
2974 assert(put_bits_count(&dst->pb) % 8 ==0);
2975 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
2976 flush_put_bits(&dst->pb);
2979 static int estimate_qp(MpegEncContext *s, int dry_run){
2980 if (s->next_lambda){
2981 s->current_picture_ptr->f.quality =
2982 s->current_picture.f.quality = s->next_lambda;
2983 if(!dry_run) s->next_lambda= 0;
2984 } else if (!s->fixed_qscale) {
2985 s->current_picture_ptr->f.quality =
2986 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
2987 if (s->current_picture.f.quality < 0)
2991 if(s->adaptive_quant){
2992 switch(s->codec_id){
2993 case AV_CODEC_ID_MPEG4:
2994 if (CONFIG_MPEG4_ENCODER)
2995 ff_clean_mpeg4_qscales(s);
2997 case AV_CODEC_ID_H263:
2998 case AV_CODEC_ID_H263P:
2999 case AV_CODEC_ID_FLV1:
3000 if (CONFIG_H263_ENCODER)
3001 ff_clean_h263_qscales(s);
3004 ff_init_qscale_tab(s);
3007 s->lambda= s->lambda_table[0];
3010 s->lambda = s->current_picture.f.quality;
3015 /* must be called before writing the header */
3016 static void set_frame_distances(MpegEncContext * s){
3017 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3018 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3020 if(s->pict_type==AV_PICTURE_TYPE_B){
3021 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3022 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3024 s->pp_time= s->time - s->last_non_b_time;
3025 s->last_non_b_time= s->time;
3026 assert(s->picture_number==0 || s->pp_time > 0);
3030 static int encode_picture(MpegEncContext *s, int picture_number)
3034 int context_count = s->slice_context_count;
3036 s->picture_number = picture_number;
3038 /* Reset the average MB variance */
3039 s->me.mb_var_sum_temp =
3040 s->me.mc_mb_var_sum_temp = 0;
3042 /* we need to initialize some time vars before we can encode b-frames */
3043 // RAL: Condition added for MPEG1VIDEO
3044 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3045 set_frame_distances(s);
3046 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3047 ff_set_mpeg4_time(s);
3049 s->me.scene_change_score=0;
3051 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3053 if(s->pict_type==AV_PICTURE_TYPE_I){
3054 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3055 else s->no_rounding=0;
3056 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3057 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3058 s->no_rounding ^= 1;
3061 if(s->flags & CODEC_FLAG_PASS2){
3062 if (estimate_qp(s,1) < 0)
3064 ff_get_2pass_fcode(s);
3065 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3066 if(s->pict_type==AV_PICTURE_TYPE_B)
3067 s->lambda= s->last_lambda_for[s->pict_type];
3069 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3073 s->mb_intra=0; //for the rate distortion & bit compare functions
3074 for(i=1; i<context_count; i++){
3075 ret = ff_update_duplicate_context(s->thread_context[i], s);
3083 /* Estimate motion for every MB */
3084 if(s->pict_type != AV_PICTURE_TYPE_I){
3085 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3086 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3087 if (s->pict_type != AV_PICTURE_TYPE_B) {
3088 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3089 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3093 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3094 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3096 for(i=0; i<s->mb_stride*s->mb_height; i++)
3097 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3099 if(!s->fixed_qscale){
3100 /* finding spatial complexity for I-frame rate control */
3101 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3104 for(i=1; i<context_count; i++){
3105 merge_context_after_me(s, s->thread_context[i]);
3107 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3108 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3111 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3112 s->pict_type= AV_PICTURE_TYPE_I;
3113 for(i=0; i<s->mb_stride*s->mb_height; i++)
3114 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3115 av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3116 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3120 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3121 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3123 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3125 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3126 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3127 s->f_code= FFMAX3(s->f_code, a, b);
3130 ff_fix_long_p_mvs(s);
3131 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3132 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3136 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3137 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3142 if(s->pict_type==AV_PICTURE_TYPE_B){
3145 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3146 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3147 s->f_code = FFMAX(a, b);
3149 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3150 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3151 s->b_code = FFMAX(a, b);
3153 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3154 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3155 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3156 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3157 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3159 for(dir=0; dir<2; dir++){
3162 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3163 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3164 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3165 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3173 if (estimate_qp(s, 0) < 0)
3176 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3177 s->qscale= 3; //reduce clipping problems
3179 if (s->out_format == FMT_MJPEG) {
3180 /* for mjpeg, we do include qscale in the matrix */
3182 int j= s->dsp.idct_permutation[i];
3184 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3186 s->y_dc_scale_table=
3187 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3188 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3189 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3190 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3194 //FIXME var duplication
3195 s->current_picture_ptr->f.key_frame =
3196 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3197 s->current_picture_ptr->f.pict_type =
3198 s->current_picture.f.pict_type = s->pict_type;
3200 if (s->current_picture.f.key_frame)
3201 s->picture_in_gop_number=0;
3203 s->last_bits= put_bits_count(&s->pb);
3204 switch(s->out_format) {
3206 if (CONFIG_MJPEG_ENCODER)
3207 ff_mjpeg_encode_picture_header(s);
3210 if (CONFIG_H261_ENCODER)
3211 ff_h261_encode_picture_header(s, picture_number);
3214 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3215 ff_wmv2_encode_picture_header(s, picture_number);
3216 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3217 ff_msmpeg4_encode_picture_header(s, picture_number);
3218 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3219 ff_mpeg4_encode_picture_header(s, picture_number);
3220 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3221 ff_rv10_encode_picture_header(s, picture_number);
3222 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3223 ff_rv20_encode_picture_header(s, picture_number);
3224 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3225 ff_flv_encode_picture_header(s, picture_number);
3226 else if (CONFIG_H263_ENCODER)
3227 ff_h263_encode_picture_header(s, picture_number);
3230 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3231 ff_mpeg1_encode_picture_header(s, picture_number);
3236 bits= put_bits_count(&s->pb);
3237 s->header_bits= bits - s->last_bits;
3239 for(i=1; i<context_count; i++){
3240 update_duplicate_context_after_me(s->thread_context[i], s);
3242 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3243 for(i=1; i<context_count; i++){
3244 merge_context_after_encode(s, s->thread_context[i]);
3250 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3251 const int intra= s->mb_intra;
3254 s->dct_count[intra]++;
3256 for(i=0; i<64; i++){
3257 int level= block[i];
3261 s->dct_error_sum[intra][i] += level;
3262 level -= s->dct_offset[intra][i];
3263 if(level<0) level=0;
3265 s->dct_error_sum[intra][i] -= level;
3266 level += s->dct_offset[intra][i];
3267 if(level>0) level=0;
3274 static int dct_quantize_trellis_c(MpegEncContext *s,
3275 int16_t *block, int n,
3276 int qscale, int *overflow){
3278 const uint8_t *scantable= s->intra_scantable.scantable;
3279 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3281 unsigned int threshold1, threshold2;
3293 int coeff_count[64];
3294 int qmul, qadd, start_i, last_non_zero, i, dc;
3295 const int esc_length= s->ac_esc_length;
3297 uint8_t * last_length;
3298 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3300 s->dsp.fdct (block);
3302 if(s->dct_error_sum)
3303 s->denoise_dct(s, block);
3305 qadd= ((qscale-1)|1)*8;
3316 /* For AIC we skip quant/dequant of INTRADC */
3321 /* note: block[0] is assumed to be positive */
3322 block[0] = (block[0] + (q >> 1)) / q;
3325 qmat = s->q_intra_matrix[qscale];
3326 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3327 bias= 1<<(QMAT_SHIFT-1);
3328 length = s->intra_ac_vlc_length;
3329 last_length= s->intra_ac_vlc_last_length;
3333 qmat = s->q_inter_matrix[qscale];
3334 length = s->inter_ac_vlc_length;
3335 last_length= s->inter_ac_vlc_last_length;
3339 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3340 threshold2= (threshold1<<1);
3342 for(i=63; i>=start_i; i--) {
3343 const int j = scantable[i];
3344 int level = block[j] * qmat[j];
3346 if(((unsigned)(level+threshold1))>threshold2){
3352 for(i=start_i; i<=last_non_zero; i++) {
3353 const int j = scantable[i];
3354 int level = block[j] * qmat[j];
3356 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3357 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3358 if(((unsigned)(level+threshold1))>threshold2){
3360 level= (bias + level)>>QMAT_SHIFT;
3362 coeff[1][i]= level-1;
3363 // coeff[2][k]= level-2;
3365 level= (bias - level)>>QMAT_SHIFT;
3366 coeff[0][i]= -level;
3367 coeff[1][i]= -level+1;
3368 // coeff[2][k]= -level+2;
3370 coeff_count[i]= FFMIN(level, 2);
3371 assert(coeff_count[i]);
3374 coeff[0][i]= (level>>31)|1;
3379 *overflow= s->max_qcoeff < max; //overflow might have happened
3381 if(last_non_zero < start_i){
3382 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3383 return last_non_zero;
3386 score_tab[start_i]= 0;
3387 survivor[0]= start_i;
3390 for(i=start_i; i<=last_non_zero; i++){
3391 int level_index, j, zero_distortion;
3392 int dct_coeff= FFABS(block[ scantable[i] ]);
3393 int best_score=256*256*256*120;
3395 if (s->dsp.fdct == ff_fdct_ifast)
3396 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3397 zero_distortion= dct_coeff*dct_coeff;
3399 for(level_index=0; level_index < coeff_count[i]; level_index++){
3401 int level= coeff[level_index][i];
3402 const int alevel= FFABS(level);
3407 if(s->out_format == FMT_H263){
3408 unquant_coeff= alevel*qmul + qadd;
3410 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3412 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3413 unquant_coeff = (unquant_coeff - 1) | 1;
3415 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3416 unquant_coeff = (unquant_coeff - 1) | 1;
3421 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3423 if((level&(~127)) == 0){
3424 for(j=survivor_count-1; j>=0; j--){
3425 int run= i - survivor[j];
3426 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3427 score += score_tab[i-run];
3429 if(score < best_score){
3432 level_tab[i+1]= level-64;
3436 if(s->out_format == FMT_H263){
3437 for(j=survivor_count-1; j>=0; j--){
3438 int run= i - survivor[j];
3439 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3440 score += score_tab[i-run];
3441 if(score < last_score){
3444 last_level= level-64;
3450 distortion += esc_length*lambda;
3451 for(j=survivor_count-1; j>=0; j--){
3452 int run= i - survivor[j];
3453 int score= distortion + score_tab[i-run];
3455 if(score < best_score){
3458 level_tab[i+1]= level-64;
3462 if(s->out_format == FMT_H263){
3463 for(j=survivor_count-1; j>=0; j--){
3464 int run= i - survivor[j];
3465 int score= distortion + score_tab[i-run];
3466 if(score < last_score){
3469 last_level= level-64;
3477 score_tab[i+1]= best_score;
3479 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3480 if(last_non_zero <= 27){
3481 for(; survivor_count; survivor_count--){
3482 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3486 for(; survivor_count; survivor_count--){
3487 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3492 survivor[ survivor_count++ ]= i+1;
3495 if(s->out_format != FMT_H263){
3496 last_score= 256*256*256*120;
3497 for(i= survivor[0]; i<=last_non_zero + 1; i++){
3498 int score= score_tab[i];
3499 if(i) score += lambda*2; //FIXME exacter?
3501 if(score < last_score){
3504 last_level= level_tab[i];
3505 last_run= run_tab[i];
3510 s->coded_score[n] = last_score;
3512 dc= FFABS(block[0]);
3513 last_non_zero= last_i - 1;
3514 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3516 if(last_non_zero < start_i)
3517 return last_non_zero;
3519 if(last_non_zero == 0 && start_i == 0){
3521 int best_score= dc * dc;
3523 for(i=0; i<coeff_count[0]; i++){
3524 int level= coeff[i][0];
3525 int alevel= FFABS(level);
3526 int unquant_coeff, score, distortion;
3528 if(s->out_format == FMT_H263){
3529 unquant_coeff= (alevel*qmul + qadd)>>3;
3531 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3532 unquant_coeff = (unquant_coeff - 1) | 1;
3534 unquant_coeff = (unquant_coeff + 4) >> 3;
3535 unquant_coeff<<= 3 + 3;
3537 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3539 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3540 else score= distortion + esc_length*lambda;
3542 if(score < best_score){
3544 best_level= level - 64;
3547 block[0]= best_level;
3548 s->coded_score[n] = best_score - dc*dc;
3549 if(best_level == 0) return -1;
3550 else return last_non_zero;
3556 block[ perm_scantable[last_non_zero] ]= last_level;
3559 for(; i>start_i; i -= run_tab[i] + 1){
3560 block[ perm_scantable[i-1] ]= level_tab[i];
3563 return last_non_zero;
3566 //#define REFINE_STATS 1
3567 static int16_t basis[64][64];
3569 static void build_basis(uint8_t *perm){
3576 double s= 0.25*(1<<BASIS_SHIFT);
3578 int perm_index= perm[index];
3579 if(i==0) s*= sqrt(0.5);
3580 if(j==0) s*= sqrt(0.5);
3581 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3588 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3589 int16_t *block, int16_t *weight, int16_t *orig,
3592 LOCAL_ALIGNED_16(int16_t, d1, [64]);
3593 const uint8_t *scantable= s->intra_scantable.scantable;
3594 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3595 // unsigned int threshold1, threshold2;
3600 int qmul, qadd, start_i, last_non_zero, i, dc;
3602 uint8_t * last_length;
3604 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3607 static int after_last=0;
3608 static int to_zero=0;
3609 static int from_zero=0;
3612 static int messed_sign=0;
3615 if(basis[0][0] == 0)
3616 build_basis(s->dsp.idct_permutation);
3627 /* For AIC we skip quant/dequant of INTRADC */
3631 q <<= RECON_SHIFT-3;
3632 /* note: block[0] is assumed to be positive */
3634 // block[0] = (block[0] + (q >> 1)) / q;
3636 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3637 // bias= 1<<(QMAT_SHIFT-1);
3638 length = s->intra_ac_vlc_length;
3639 last_length= s->intra_ac_vlc_last_length;
3643 length = s->inter_ac_vlc_length;
3644 last_length= s->inter_ac_vlc_last_length;
3646 last_non_zero = s->block_last_index[n];
3651 dc += (1<<(RECON_SHIFT-1));
3652 for(i=0; i<64; i++){
3653 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3656 STOP_TIMER("memset rem[]")}
3659 for(i=0; i<64; i++){
3664 w= FFABS(weight[i]) + qns*one;
3665 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3668 // w=weight[i] = (63*qns + (w/2)) / w;
3674 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3680 for(i=start_i; i<=last_non_zero; i++){
3681 int j= perm_scantable[i];
3682 const int level= block[j];
3686 if(level<0) coeff= qmul*level - qadd;
3687 else coeff= qmul*level + qadd;
3688 run_tab[rle_index++]=run;
3691 s->dsp.add_8x8basis(rem, basis[j], coeff);
3697 if(last_non_zero>0){
3698 STOP_TIMER("init rem[]")
3705 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3708 int run2, best_unquant_change=0, analyze_gradient;
3712 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3714 if(analyze_gradient){
3718 for(i=0; i<64; i++){
3721 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3724 STOP_TIMER("rem*w*w")}
3734 const int level= block[0];
3735 int change, old_coeff;
3737 assert(s->mb_intra);
3741 for(change=-1; change<=1; change+=2){
3742 int new_level= level + change;
3743 int score, new_coeff;
3745 new_coeff= q*new_level;
3746 if(new_coeff >= 2048 || new_coeff < 0)
3749 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3750 if(score<best_score){
3753 best_change= change;
3754 best_unquant_change= new_coeff - old_coeff;
3761 run2= run_tab[rle_index++];
3765 for(i=start_i; i<64; i++){
3766 int j= perm_scantable[i];
3767 const int level= block[j];
3768 int change, old_coeff;
3770 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3774 if(level<0) old_coeff= qmul*level - qadd;
3775 else old_coeff= qmul*level + qadd;
3776 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3780 assert(run2>=0 || i >= last_non_zero );
3783 for(change=-1; change<=1; change+=2){
3784 int new_level= level + change;
3785 int score, new_coeff, unquant_change;
3788 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3792 if(new_level<0) new_coeff= qmul*new_level - qadd;
3793 else new_coeff= qmul*new_level + qadd;
3794 if(new_coeff >= 2048 || new_coeff <= -2048)
3796 //FIXME check for overflow
3799 if(level < 63 && level > -63){
3800 if(i < last_non_zero)
3801 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
3802 - length[UNI_AC_ENC_INDEX(run, level+64)];
3804 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3805 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3808 assert(FFABS(new_level)==1);
3810 if(analyze_gradient){
3811 int g= d1[ scantable[i] ];
3812 if(g && (g^new_level) >= 0)
3816 if(i < last_non_zero){
3817 int next_i= i + run2 + 1;
3818 int next_level= block[ perm_scantable[next_i] ] + 64;
3820 if(next_level&(~127))
3823 if(next_i < last_non_zero)
3824 score += length[UNI_AC_ENC_INDEX(run, 65)]
3825 + length[UNI_AC_ENC_INDEX(run2, next_level)]
3826 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3828 score += length[UNI_AC_ENC_INDEX(run, 65)]
3829 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3830 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3832 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3834 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3835 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3841 assert(FFABS(level)==1);
3843 if(i < last_non_zero){
3844 int next_i= i + run2 + 1;
3845 int next_level= block[ perm_scantable[next_i] ] + 64;
3847 if(next_level&(~127))
3850 if(next_i < last_non_zero)
3851 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3852 - length[UNI_AC_ENC_INDEX(run2, next_level)]
3853 - length[UNI_AC_ENC_INDEX(run, 65)];
3855 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3856 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3857 - length[UNI_AC_ENC_INDEX(run, 65)];
3859 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3861 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3862 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3869 unquant_change= new_coeff - old_coeff;
3870 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3872 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3873 if(score<best_score){
3876 best_change= change;
3877 best_unquant_change= unquant_change;
3881 prev_level= level + 64;
3882 if(prev_level&(~127))
3891 STOP_TIMER("iterative step")}
3895 int j= perm_scantable[ best_coeff ];
3897 block[j] += best_change;
3899 if(best_coeff > last_non_zero){
3900 last_non_zero= best_coeff;
3908 if(block[j] - best_change){
3909 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
3921 for(; last_non_zero>=start_i; last_non_zero--){
3922 if(block[perm_scantable[last_non_zero]])
3928 if(256*256*256*64 % count == 0){
3929 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
3934 for(i=start_i; i<=last_non_zero; i++){
3935 int j= perm_scantable[i];
3936 const int level= block[j];
3939 run_tab[rle_index++]=run;
3946 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
3952 if(last_non_zero>0){
3953 STOP_TIMER("iterative search")
3958 return last_non_zero;
3961 int ff_dct_quantize_c(MpegEncContext *s,
3962 int16_t *block, int n,
3963 int qscale, int *overflow)
3965 int i, j, level, last_non_zero, q, start_i;
3967 const uint8_t *scantable= s->intra_scantable.scantable;
3970 unsigned int threshold1, threshold2;
3972 s->dsp.fdct (block);
3974 if(s->dct_error_sum)
3975 s->denoise_dct(s, block);
3985 /* For AIC we skip quant/dequant of INTRADC */
3988 /* note: block[0] is assumed to be positive */
3989 block[0] = (block[0] + (q >> 1)) / q;
3992 qmat = s->q_intra_matrix[qscale];
3993 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3997 qmat = s->q_inter_matrix[qscale];
3998 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4000 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4001 threshold2= (threshold1<<1);
4002 for(i=63;i>=start_i;i--) {
4004 level = block[j] * qmat[j];
4006 if(((unsigned)(level+threshold1))>threshold2){
4013 for(i=start_i; i<=last_non_zero; i++) {
4015 level = block[j] * qmat[j];
4017 // if( bias+level >= (1<<QMAT_SHIFT)
4018 // || bias-level >= (1<<QMAT_SHIFT)){
4019 if(((unsigned)(level+threshold1))>threshold2){
4021 level= (bias + level)>>QMAT_SHIFT;
4024 level= (bias - level)>>QMAT_SHIFT;
4032 *overflow= s->max_qcoeff < max; //overflow might have happened
4034 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4035 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4036 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4038 return last_non_zero;
4041 #define OFFSET(x) offsetof(MpegEncContext, x)
4042 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4043 static const AVOption h263_options[] = {
4044 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4045 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4046 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4051 static const AVClass h263_class = {
4052 .class_name = "H.263 encoder",
4053 .item_name = av_default_item_name,
4054 .option = h263_options,
4055 .version = LIBAVUTIL_VERSION_INT,
4058 AVCodec ff_h263_encoder = {
4060 .type = AVMEDIA_TYPE_VIDEO,
4061 .id = AV_CODEC_ID_H263,
4062 .priv_data_size = sizeof(MpegEncContext),
4063 .init = ff_MPV_encode_init,
4064 .encode2 = ff_MPV_encode_picture,
4065 .close = ff_MPV_encode_end,
4066 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4067 .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4068 .priv_class = &h263_class,
4071 static const AVOption h263p_options[] = {
4072 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4073 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4074 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4075 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4079 static const AVClass h263p_class = {
4080 .class_name = "H.263p encoder",
4081 .item_name = av_default_item_name,
4082 .option = h263p_options,
4083 .version = LIBAVUTIL_VERSION_INT,
4086 AVCodec ff_h263p_encoder = {
4088 .type = AVMEDIA_TYPE_VIDEO,
4089 .id = AV_CODEC_ID_H263P,
4090 .priv_data_size = sizeof(MpegEncContext),
4091 .init = ff_MPV_encode_init,
4092 .encode2 = ff_MPV_encode_picture,
4093 .close = ff_MPV_encode_end,
4094 .capabilities = CODEC_CAP_SLICE_THREADS,
4095 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4096 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4097 .priv_class = &h263p_class,
4100 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4102 AVCodec ff_msmpeg4v2_encoder = {
4103 .name = "msmpeg4v2",
4104 .type = AVMEDIA_TYPE_VIDEO,
4105 .id = AV_CODEC_ID_MSMPEG4V2,
4106 .priv_data_size = sizeof(MpegEncContext),
4107 .init = ff_MPV_encode_init,
4108 .encode2 = ff_MPV_encode_picture,
4109 .close = ff_MPV_encode_end,
4110 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4111 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4112 .priv_class = &msmpeg4v2_class,
4115 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4117 AVCodec ff_msmpeg4v3_encoder = {
4119 .type = AVMEDIA_TYPE_VIDEO,
4120 .id = AV_CODEC_ID_MSMPEG4V3,
4121 .priv_data_size = sizeof(MpegEncContext),
4122 .init = ff_MPV_encode_init,
4123 .encode2 = ff_MPV_encode_picture,
4124 .close = ff_MPV_encode_end,
4125 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4126 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4127 .priv_class = &msmpeg4v3_class,
4130 FF_MPV_GENERIC_CLASS(wmv1)
4132 AVCodec ff_wmv1_encoder = {
4134 .type = AVMEDIA_TYPE_VIDEO,
4135 .id = AV_CODEC_ID_WMV1,
4136 .priv_data_size = sizeof(MpegEncContext),
4137 .init = ff_MPV_encode_init,
4138 .encode2 = ff_MPV_encode_picture,
4139 .close = ff_MPV_encode_end,
4140 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4141 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4142 .priv_class = &wmv1_class,