2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/internal.h"
31 #include "libavutil/intmath.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/opt.h"
39 #include "mpegvideo.h"
47 #include "aandcttab.h"
49 #include "mpeg4video.h"
51 #include "bytestream.h"
57 static int encode_picture(MpegEncContext *s, int picture_number);
58 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
59 static int sse_mb(MpegEncContext *s);
60 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
61 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
65 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
66 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
68 const AVOption ff_mpv_generic_options[] = {
73 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
74 uint16_t (*qmat16)[2][64],
75 const uint16_t *quant_matrix,
76 int bias, int qmin, int qmax, int intra)
81 for (qscale = qmin; qscale <= qmax; qscale++) {
83 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
84 dsp->fdct == ff_jpeg_fdct_islow_10 ||
85 dsp->fdct == ff_faandct) {
86 for (i = 0; i < 64; i++) {
87 const int j = dsp->idct_permutation[i];
88 /* 16 <= qscale * quant_matrix[i] <= 7905
89 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
90 * 19952 <= x <= 249205026
91 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
92 * 3444240 >= (1 << 36) / (x) >= 275 */
94 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
95 (qscale * quant_matrix[j]));
97 } else if (dsp->fdct == ff_fdct_ifast) {
98 for (i = 0; i < 64; i++) {
99 const int j = dsp->idct_permutation[i];
100 /* 16 <= qscale * quant_matrix[i] <= 7905
101 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
102 * 19952 <= x <= 249205026
103 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
104 * 3444240 >= (1 << 36) / (x) >= 275 */
106 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
107 (ff_aanscales[i] * qscale *
111 for (i = 0; i < 64; i++) {
112 const int j = dsp->idct_permutation[i];
113 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
114 * Assume x = qscale * quant_matrix[i]
116 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
117 * so 32768 >= (1 << 19) / (x) >= 67 */
118 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
119 (qscale * quant_matrix[j]));
120 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
121 // (qscale * quant_matrix[i]);
122 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
123 (qscale * quant_matrix[j]);
125 if (qmat16[qscale][0][i] == 0 ||
126 qmat16[qscale][0][i] == 128 * 256)
127 qmat16[qscale][0][i] = 128 * 256 - 1;
128 qmat16[qscale][1][i] =
129 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
130 qmat16[qscale][0][i]);
134 for (i = intra; i < 64; i++) {
136 if (dsp->fdct == ff_fdct_ifast) {
137 max = (8191LL * ff_aanscales[i]) >> 14;
139 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
145 av_log(NULL, AV_LOG_INFO,
146 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
151 static inline void update_qscale(MpegEncContext *s)
153 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
154 (FF_LAMBDA_SHIFT + 7);
155 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
157 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
161 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
167 for (i = 0; i < 64; i++) {
168 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
175 * init s->current_picture.qscale_table from s->lambda_table
177 void ff_init_qscale_tab(MpegEncContext *s)
179 int8_t * const qscale_table = s->current_picture.qscale_table;
182 for (i = 0; i < s->mb_num; i++) {
183 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
184 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
185 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
190 static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst,
193 dst->pict_type = src->pict_type;
194 dst->quality = src->quality;
195 dst->coded_picture_number = src->coded_picture_number;
196 dst->display_picture_number = src->display_picture_number;
197 //dst->reference = src->reference;
199 dst->interlaced_frame = src->interlaced_frame;
200 dst->top_field_first = src->top_field_first;
203 static void update_duplicate_context_after_me(MpegEncContext *dst,
206 #define COPY(a) dst->a= src->a
208 COPY(current_picture);
214 COPY(picture_in_gop_number);
215 COPY(gop_picture_number);
216 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
217 COPY(progressive_frame); // FIXME don't set in encode_header
218 COPY(partitioned_frame); // FIXME don't set in encode_header
223 * Set the given MpegEncContext to defaults for encoding.
224 * the changed fields will not depend upon the prior state of the MpegEncContext.
226 static void MPV_encode_defaults(MpegEncContext *s)
229 ff_MPV_common_defaults(s);
231 for (i = -16; i < 16; i++) {
232 default_fcode_tab[i + MAX_MV] = 1;
234 s->me.mv_penalty = default_mv_penalty;
235 s->fcode_tab = default_fcode_tab;
238 /* init video encoder */
239 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
241 MpegEncContext *s = avctx->priv_data;
243 int chroma_h_shift, chroma_v_shift;
245 MPV_encode_defaults(s);
247 switch (avctx->codec_id) {
248 case AV_CODEC_ID_MPEG2VIDEO:
249 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
250 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
251 av_log(avctx, AV_LOG_ERROR,
252 "only YUV420 and YUV422 are supported\n");
256 case AV_CODEC_ID_LJPEG:
257 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
258 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
259 avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
260 avctx->pix_fmt != AV_PIX_FMT_BGRA &&
261 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
262 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
263 avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
264 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
265 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
269 case AV_CODEC_ID_MJPEG:
270 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
271 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
272 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
273 avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
274 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
275 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
280 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
281 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
286 switch (avctx->pix_fmt) {
287 case AV_PIX_FMT_YUVJ422P:
288 case AV_PIX_FMT_YUV422P:
289 s->chroma_format = CHROMA_422;
291 case AV_PIX_FMT_YUVJ420P:
292 case AV_PIX_FMT_YUV420P:
294 s->chroma_format = CHROMA_420;
298 s->bit_rate = avctx->bit_rate;
299 s->width = avctx->width;
300 s->height = avctx->height;
301 if (avctx->gop_size > 600 &&
302 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
303 av_log(avctx, AV_LOG_ERROR,
304 "Warning keyframe interval too large! reducing it ...\n");
305 avctx->gop_size = 600;
307 s->gop_size = avctx->gop_size;
309 s->flags = avctx->flags;
310 s->flags2 = avctx->flags2;
311 s->max_b_frames = avctx->max_b_frames;
312 s->codec_id = avctx->codec->id;
313 s->strict_std_compliance = avctx->strict_std_compliance;
314 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
315 s->mpeg_quant = avctx->mpeg_quant;
316 s->rtp_mode = !!avctx->rtp_payload_size;
317 s->intra_dc_precision = avctx->intra_dc_precision;
318 s->user_specified_pts = AV_NOPTS_VALUE;
320 if (s->gop_size <= 1) {
327 s->me_method = avctx->me_method;
330 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
332 s->adaptive_quant = (s->avctx->lumi_masking ||
333 s->avctx->dark_masking ||
334 s->avctx->temporal_cplx_masking ||
335 s->avctx->spatial_cplx_masking ||
336 s->avctx->p_masking ||
337 s->avctx->border_masking ||
338 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
341 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
343 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
344 av_log(avctx, AV_LOG_ERROR,
345 "a vbv buffer size is needed, "
346 "for encoding with a maximum bitrate\n");
350 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
351 av_log(avctx, AV_LOG_INFO,
352 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
355 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
356 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
360 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
361 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
365 if (avctx->rc_max_rate &&
366 avctx->rc_max_rate == avctx->bit_rate &&
367 avctx->rc_max_rate != avctx->rc_min_rate) {
368 av_log(avctx, AV_LOG_INFO,
369 "impossible bitrate constraints, this will fail\n");
372 if (avctx->rc_buffer_size &&
373 avctx->bit_rate * (int64_t)avctx->time_base.num >
374 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
375 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
379 if (!s->fixed_qscale &&
380 avctx->bit_rate * av_q2d(avctx->time_base) >
381 avctx->bit_rate_tolerance) {
382 av_log(avctx, AV_LOG_ERROR,
383 "bitrate tolerance too small for bitrate\n");
387 if (s->avctx->rc_max_rate &&
388 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
389 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
390 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
391 90000LL * (avctx->rc_buffer_size - 1) >
392 s->avctx->rc_max_rate * 0xFFFFLL) {
393 av_log(avctx, AV_LOG_INFO,
394 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
395 "specified vbv buffer is too large for the given bitrate!\n");
398 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
399 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
400 s->codec_id != AV_CODEC_ID_FLV1) {
401 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
405 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
406 av_log(avctx, AV_LOG_ERROR,
407 "OBMC is only supported with simple mb decision\n");
411 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
412 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
416 if (s->max_b_frames &&
417 s->codec_id != AV_CODEC_ID_MPEG4 &&
418 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
419 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
420 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
424 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
425 s->codec_id == AV_CODEC_ID_H263 ||
426 s->codec_id == AV_CODEC_ID_H263P) &&
427 (avctx->sample_aspect_ratio.num > 255 ||
428 avctx->sample_aspect_ratio.den > 255)) {
429 av_log(avctx, AV_LOG_ERROR,
430 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
431 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
435 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
436 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
437 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
441 // FIXME mpeg2 uses that too
442 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
443 av_log(avctx, AV_LOG_ERROR,
444 "mpeg2 style quantization not supported by codec\n");
448 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
449 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
453 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
454 s->avctx->mb_decision != FF_MB_DECISION_RD) {
455 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
459 if (s->avctx->scenechange_threshold < 1000000000 &&
460 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
461 av_log(avctx, AV_LOG_ERROR,
462 "closed gop with scene change detection are not supported yet, "
463 "set threshold to 1000000000\n");
467 if (s->flags & CODEC_FLAG_LOW_DELAY) {
468 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
469 av_log(avctx, AV_LOG_ERROR,
470 "low delay forcing is only available for mpeg2\n");
473 if (s->max_b_frames != 0) {
474 av_log(avctx, AV_LOG_ERROR,
475 "b frames cannot be used with low delay\n");
480 if (s->q_scale_type == 1) {
481 if (avctx->qmax > 12) {
482 av_log(avctx, AV_LOG_ERROR,
483 "non linear quant only supports qmax <= 12 currently\n");
488 if (s->avctx->thread_count > 1 &&
489 s->codec_id != AV_CODEC_ID_MPEG4 &&
490 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
491 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
492 (s->codec_id != AV_CODEC_ID_H263P)) {
493 av_log(avctx, AV_LOG_ERROR,
494 "multi threaded encoding not supported by codec\n");
498 if (s->avctx->thread_count < 1) {
499 av_log(avctx, AV_LOG_ERROR,
500 "automatic thread number detection not supported by codec,"
505 if (s->avctx->thread_count > 1)
508 if (!avctx->time_base.den || !avctx->time_base.num) {
509 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
513 i = (INT_MAX / 2 + 128) >> 8;
514 if (avctx->mb_threshold >= i) {
515 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
520 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
521 av_log(avctx, AV_LOG_INFO,
522 "notice: b_frame_strategy only affects the first pass\n");
523 avctx->b_frame_strategy = 0;
526 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
528 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
529 avctx->time_base.den /= i;
530 avctx->time_base.num /= i;
534 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
535 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
536 // (a + x * 3 / 8) / x
537 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
538 s->inter_quant_bias = 0;
540 s->intra_quant_bias = 0;
542 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
545 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
546 s->intra_quant_bias = avctx->intra_quant_bias;
547 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
548 s->inter_quant_bias = avctx->inter_quant_bias;
550 av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
553 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
554 s->avctx->time_base.den > (1 << 16) - 1) {
555 av_log(avctx, AV_LOG_ERROR,
556 "timebase %d/%d not supported by MPEG 4 standard, "
557 "the maximum admitted value for the timebase denominator "
558 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
562 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
564 switch (avctx->codec->id) {
565 case AV_CODEC_ID_MPEG1VIDEO:
566 s->out_format = FMT_MPEG1;
567 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
568 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
570 case AV_CODEC_ID_MPEG2VIDEO:
571 s->out_format = FMT_MPEG1;
572 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
573 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
576 case AV_CODEC_ID_LJPEG:
577 case AV_CODEC_ID_MJPEG:
578 s->out_format = FMT_MJPEG;
579 s->intra_only = 1; /* force intra only for jpeg */
580 if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
581 avctx->pix_fmt == AV_PIX_FMT_BGRA) {
582 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
583 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
584 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
586 s->mjpeg_vsample[0] = 2;
587 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
588 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
589 s->mjpeg_hsample[0] = 2;
590 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
591 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
593 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
594 ff_mjpeg_encode_init(s) < 0)
599 case AV_CODEC_ID_H261:
600 if (!CONFIG_H261_ENCODER)
602 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
603 av_log(avctx, AV_LOG_ERROR,
604 "The specified picture size of %dx%d is not valid for the "
605 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
606 s->width, s->height);
609 s->out_format = FMT_H261;
613 case AV_CODEC_ID_H263:
614 if (!CONFIG_H263_ENCODER)
616 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
617 s->width, s->height) == 8) {
618 av_log(avctx, AV_LOG_INFO,
619 "The specified picture size of %dx%d is not valid for "
620 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
621 "352x288, 704x576, and 1408x1152."
622 "Try H.263+.\n", s->width, s->height);
625 s->out_format = FMT_H263;
629 case AV_CODEC_ID_H263P:
630 s->out_format = FMT_H263;
633 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
634 s->modified_quant = s->h263_aic;
635 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
636 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
639 /* These are just to be sure */
643 case AV_CODEC_ID_FLV1:
644 s->out_format = FMT_H263;
645 s->h263_flv = 2; /* format = 1; 11-bit codes */
646 s->unrestricted_mv = 1;
647 s->rtp_mode = 0; /* don't allow GOB */
651 case AV_CODEC_ID_RV10:
652 s->out_format = FMT_H263;
656 case AV_CODEC_ID_RV20:
657 s->out_format = FMT_H263;
660 s->modified_quant = 1;
664 s->unrestricted_mv = 0;
666 case AV_CODEC_ID_MPEG4:
667 s->out_format = FMT_H263;
669 s->unrestricted_mv = 1;
670 s->low_delay = s->max_b_frames ? 0 : 1;
671 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
673 case AV_CODEC_ID_MSMPEG4V2:
674 s->out_format = FMT_H263;
676 s->unrestricted_mv = 1;
677 s->msmpeg4_version = 2;
681 case AV_CODEC_ID_MSMPEG4V3:
682 s->out_format = FMT_H263;
684 s->unrestricted_mv = 1;
685 s->msmpeg4_version = 3;
686 s->flipflop_rounding = 1;
690 case AV_CODEC_ID_WMV1:
691 s->out_format = FMT_H263;
693 s->unrestricted_mv = 1;
694 s->msmpeg4_version = 4;
695 s->flipflop_rounding = 1;
699 case AV_CODEC_ID_WMV2:
700 s->out_format = FMT_H263;
702 s->unrestricted_mv = 1;
703 s->msmpeg4_version = 5;
704 s->flipflop_rounding = 1;
712 avctx->has_b_frames = !s->low_delay;
716 s->progressive_frame =
717 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
718 CODEC_FLAG_INTERLACED_ME) ||
722 if (ff_MPV_common_init(s) < 0)
726 ff_MPV_encode_init_x86(s);
728 if (!s->dct_quantize)
729 s->dct_quantize = ff_dct_quantize_c;
731 s->denoise_dct = denoise_dct_c;
732 s->fast_dct_quantize = s->dct_quantize;
734 s->dct_quantize = dct_quantize_trellis_c;
736 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
737 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
739 s->quant_precision = 5;
741 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
742 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
744 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
745 ff_h261_encode_init(s);
746 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
747 ff_h263_encode_init(s);
748 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
749 ff_msmpeg4_encode_init(s);
750 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
751 && s->out_format == FMT_MPEG1)
752 ff_mpeg1_encode_init(s);
755 for (i = 0; i < 64; i++) {
756 int j = s->dsp.idct_permutation[i];
757 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
759 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
760 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
761 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
763 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
766 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
767 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
769 if (s->avctx->intra_matrix)
770 s->intra_matrix[j] = s->avctx->intra_matrix[i];
771 if (s->avctx->inter_matrix)
772 s->inter_matrix[j] = s->avctx->inter_matrix[i];
775 /* precompute matrix */
776 /* for mjpeg, we do include qscale in the matrix */
777 if (s->out_format != FMT_MJPEG) {
778 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
779 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
781 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
782 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
786 if (ff_rate_control_init(s) < 0)
792 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
794 MpegEncContext *s = avctx->priv_data;
796 ff_rate_control_uninit(s);
798 ff_MPV_common_end(s);
799 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
800 s->out_format == FMT_MJPEG)
801 ff_mjpeg_encode_close(s);
803 av_freep(&avctx->extradata);
808 static int get_sae(uint8_t *src, int ref, int stride)
813 for (y = 0; y < 16; y++) {
814 for (x = 0; x < 16; x++) {
815 acc += FFABS(src[x + y * stride] - ref);
822 static int get_intra_count(MpegEncContext *s, uint8_t *src,
823 uint8_t *ref, int stride)
831 for (y = 0; y < h; y += 16) {
832 for (x = 0; x < w; x += 16) {
833 int offset = x + y * stride;
834 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
836 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
837 int sae = get_sae(src + offset, mean, stride);
839 acc += sae + 500 < sad;
846 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
850 int i, display_picture_number = 0, ret;
851 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
852 (s->low_delay ? 0 : 1);
857 display_picture_number = s->input_picture_number++;
859 if (pts != AV_NOPTS_VALUE) {
860 if (s->user_specified_pts != AV_NOPTS_VALUE) {
862 int64_t last = s->user_specified_pts;
865 av_log(s->avctx, AV_LOG_ERROR,
866 "Error, Invalid timestamp=%"PRId64", "
867 "last=%"PRId64"\n", pts, s->user_specified_pts);
871 if (!s->low_delay && display_picture_number == 1)
872 s->dts_delta = time - last;
874 s->user_specified_pts = pts;
876 if (s->user_specified_pts != AV_NOPTS_VALUE) {
877 s->user_specified_pts =
878 pts = s->user_specified_pts + 1;
879 av_log(s->avctx, AV_LOG_INFO,
880 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
883 pts = display_picture_number;
889 if (!pic_arg->buf[0]);
891 if (pic_arg->linesize[0] != s->linesize)
893 if (pic_arg->linesize[1] != s->uvlinesize)
895 if (pic_arg->linesize[2] != s->uvlinesize)
898 av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0],
899 pic_arg->linesize[1], s->linesize, s->uvlinesize);
902 i = ff_find_unused_picture(s, 1);
906 pic = &s->picture[i];
909 if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
911 if (ff_alloc_picture(s, pic, 1) < 0) {
915 i = ff_find_unused_picture(s, 0);
919 pic = &s->picture[i];
922 if (ff_alloc_picture(s, pic, 0) < 0) {
926 if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
927 pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
928 pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
931 int h_chroma_shift, v_chroma_shift;
932 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
936 for (i = 0; i < 3; i++) {
937 int src_stride = pic_arg->linesize[i];
938 int dst_stride = i ? s->uvlinesize : s->linesize;
939 int h_shift = i ? h_chroma_shift : 0;
940 int v_shift = i ? v_chroma_shift : 0;
941 int w = s->width >> h_shift;
942 int h = s->height >> v_shift;
943 uint8_t *src = pic_arg->data[i];
944 uint8_t *dst = pic->f.data[i];
946 if (!s->avctx->rc_buffer_size)
947 dst += INPLACE_OFFSET;
949 if (src_stride == dst_stride)
950 memcpy(dst, src, src_stride * h);
961 copy_picture_attributes(s, &pic->f, pic_arg);
962 pic->f.display_picture_number = display_picture_number;
963 pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
966 /* shift buffer entries */
967 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
968 s->input_picture[i - 1] = s->input_picture[i];
970 s->input_picture[encoding_delay] = (Picture*) pic;
975 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
981 for (plane = 0; plane < 3; plane++) {
982 const int stride = p->f.linesize[plane];
983 const int bw = plane ? 1 : 2;
984 for (y = 0; y < s->mb_height * bw; y++) {
985 for (x = 0; x < s->mb_width * bw; x++) {
986 int off = p->shared ? 0 : 16;
987 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
988 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
989 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
991 switch (s->avctx->frame_skip_exp) {
992 case 0: score = FFMAX(score, v); break;
993 case 1: score += FFABS(v); break;
994 case 2: score += v * v; break;
995 case 3: score64 += FFABS(v * v * (int64_t)v); break;
996 case 4: score64 += v * v * (int64_t)(v * v); break;
1005 if (score64 < s->avctx->frame_skip_threshold)
1007 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1012 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1014 AVPacket pkt = { 0 };
1015 int ret, got_output;
1017 av_init_packet(&pkt);
1018 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1023 av_free_packet(&pkt);
1027 static int estimate_best_b_count(MpegEncContext *s)
1029 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1030 AVCodecContext *c = avcodec_alloc_context3(NULL);
1031 AVFrame input[FF_MAX_B_FRAMES + 2];
1032 const int scale = s->avctx->brd_scale;
1033 int i, j, out_size, p_lambda, b_lambda, lambda2;
1034 int64_t best_rd = INT64_MAX;
1035 int best_b_count = -1;
1037 assert(scale >= 0 && scale <= 3);
1040 //s->next_picture_ptr->quality;
1041 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1042 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1043 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1044 if (!b_lambda) // FIXME we should do this somewhere else
1045 b_lambda = p_lambda;
1046 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1049 c->width = s->width >> scale;
1050 c->height = s->height >> scale;
1051 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1052 CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1053 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1054 c->mb_decision = s->avctx->mb_decision;
1055 c->me_cmp = s->avctx->me_cmp;
1056 c->mb_cmp = s->avctx->mb_cmp;
1057 c->me_sub_cmp = s->avctx->me_sub_cmp;
1058 c->pix_fmt = AV_PIX_FMT_YUV420P;
1059 c->time_base = s->avctx->time_base;
1060 c->max_b_frames = s->max_b_frames;
1062 if (avcodec_open2(c, codec, NULL) < 0)
1065 for (i = 0; i < s->max_b_frames + 2; i++) {
1066 int ysize = c->width * c->height;
1067 int csize = (c->width / 2) * (c->height / 2);
1068 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1069 s->next_picture_ptr;
1071 avcodec_get_frame_defaults(&input[i]);
1072 input[i].data[0] = av_malloc(ysize + 2 * csize);
1073 input[i].data[1] = input[i].data[0] + ysize;
1074 input[i].data[2] = input[i].data[1] + csize;
1075 input[i].linesize[0] = c->width;
1076 input[i].linesize[1] =
1077 input[i].linesize[2] = c->width / 2;
1079 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1080 pre_input = *pre_input_ptr;
1082 if (!pre_input.shared && i) {
1083 pre_input.f.data[0] += INPLACE_OFFSET;
1084 pre_input.f.data[1] += INPLACE_OFFSET;
1085 pre_input.f.data[2] += INPLACE_OFFSET;
1088 s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1089 pre_input.f.data[0], pre_input.f.linesize[0],
1090 c->width, c->height);
1091 s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1092 pre_input.f.data[1], pre_input.f.linesize[1],
1093 c->width >> 1, c->height >> 1);
1094 s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1095 pre_input.f.data[2], pre_input.f.linesize[2],
1096 c->width >> 1, c->height >> 1);
1100 for (j = 0; j < s->max_b_frames + 1; j++) {
1103 if (!s->input_picture[j])
1106 c->error[0] = c->error[1] = c->error[2] = 0;
1108 input[0].pict_type = AV_PICTURE_TYPE_I;
1109 input[0].quality = 1 * FF_QP2LAMBDA;
1111 out_size = encode_frame(c, &input[0]);
1113 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1115 for (i = 0; i < s->max_b_frames + 1; i++) {
1116 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1118 input[i + 1].pict_type = is_p ?
1119 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1120 input[i + 1].quality = is_p ? p_lambda : b_lambda;
1122 out_size = encode_frame(c, &input[i + 1]);
1124 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1127 /* get the delayed frames */
1129 out_size = encode_frame(c, NULL);
1130 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1133 rd += c->error[0] + c->error[1] + c->error[2];
1144 for (i = 0; i < s->max_b_frames + 2; i++) {
1145 av_freep(&input[i].data[0]);
1148 return best_b_count;
1151 static int select_input_picture(MpegEncContext *s)
1155 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1156 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1157 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1159 /* set next picture type & ordering */
1160 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1161 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1162 s->next_picture_ptr == NULL || s->intra_only) {
1163 s->reordered_input_picture[0] = s->input_picture[0];
1164 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1165 s->reordered_input_picture[0]->f.coded_picture_number =
1166 s->coded_picture_number++;
1170 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1171 if (s->picture_in_gop_number < s->gop_size &&
1172 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1173 // FIXME check that te gop check above is +-1 correct
1174 av_frame_unref(&s->input_picture[0]->f);
1177 ff_vbv_update(s, 0);
1183 if (s->flags & CODEC_FLAG_PASS2) {
1184 for (i = 0; i < s->max_b_frames + 1; i++) {
1185 int pict_num = s->input_picture[0]->f.display_picture_number + i;
1187 if (pict_num >= s->rc_context.num_entries)
1189 if (!s->input_picture[i]) {
1190 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1194 s->input_picture[i]->f.pict_type =
1195 s->rc_context.entry[pict_num].new_pict_type;
1199 if (s->avctx->b_frame_strategy == 0) {
1200 b_frames = s->max_b_frames;
1201 while (b_frames && !s->input_picture[b_frames])
1203 } else if (s->avctx->b_frame_strategy == 1) {
1204 for (i = 1; i < s->max_b_frames + 1; i++) {
1205 if (s->input_picture[i] &&
1206 s->input_picture[i]->b_frame_score == 0) {
1207 s->input_picture[i]->b_frame_score =
1209 s->input_picture[i ]->f.data[0],
1210 s->input_picture[i - 1]->f.data[0],
1214 for (i = 0; i < s->max_b_frames + 1; i++) {
1215 if (s->input_picture[i] == NULL ||
1216 s->input_picture[i]->b_frame_score - 1 >
1217 s->mb_num / s->avctx->b_sensitivity)
1221 b_frames = FFMAX(0, i - 1);
1224 for (i = 0; i < b_frames + 1; i++) {
1225 s->input_picture[i]->b_frame_score = 0;
1227 } else if (s->avctx->b_frame_strategy == 2) {
1228 b_frames = estimate_best_b_count(s);
1230 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1236 for (i = b_frames - 1; i >= 0; i--) {
1237 int type = s->input_picture[i]->f.pict_type;
1238 if (type && type != AV_PICTURE_TYPE_B)
1241 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1242 b_frames == s->max_b_frames) {
1243 av_log(s->avctx, AV_LOG_ERROR,
1244 "warning, too many b frames in a row\n");
1247 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1248 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1249 s->gop_size > s->picture_in_gop_number) {
1250 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1252 if (s->flags & CODEC_FLAG_CLOSED_GOP)
1254 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1258 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1259 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1262 s->reordered_input_picture[0] = s->input_picture[b_frames];
1263 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1264 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1265 s->reordered_input_picture[0]->f.coded_picture_number =
1266 s->coded_picture_number++;
1267 for (i = 0; i < b_frames; i++) {
1268 s->reordered_input_picture[i + 1] = s->input_picture[i];
1269 s->reordered_input_picture[i + 1]->f.pict_type =
1271 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1272 s->coded_picture_number++;
1277 if (s->reordered_input_picture[0]) {
1278 s->reordered_input_picture[0]->reference =
1279 s->reordered_input_picture[0]->f.pict_type !=
1280 AV_PICTURE_TYPE_B ? 3 : 0;
1282 ff_mpeg_unref_picture(s, &s->new_picture);
1283 if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1286 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1287 // input is a shared pix, so we can't modifiy it -> alloc a new
1288 // one & ensure that the shared one is reuseable
1291 int i = ff_find_unused_picture(s, 0);
1294 pic = &s->picture[i];
1296 pic->reference = s->reordered_input_picture[0]->reference;
1297 if (ff_alloc_picture(s, pic, 0) < 0) {
1301 copy_picture_attributes(s, &pic->f,
1302 &s->reordered_input_picture[0]->f);
1304 /* mark us unused / free shared pic */
1305 av_frame_unref(&s->reordered_input_picture[0]->f);
1306 s->reordered_input_picture[0]->shared = 0;
1308 s->current_picture_ptr = pic;
1310 // input is not a shared pix -> reuse buffer for current_pix
1311 s->current_picture_ptr = s->reordered_input_picture[0];
1312 for (i = 0; i < 4; i++) {
1313 s->new_picture.f.data[i] += INPLACE_OFFSET;
1316 ff_mpeg_unref_picture(s, &s->current_picture);
1317 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1318 s->current_picture_ptr)) < 0)
1321 s->picture_number = s->new_picture.f.display_picture_number;
1323 ff_mpeg_unref_picture(s, &s->new_picture);
1328 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1329 const AVFrame *pic_arg, int *got_packet)
1331 MpegEncContext *s = avctx->priv_data;
1332 int i, stuffing_count, ret;
1333 int context_count = s->slice_context_count;
1335 s->picture_in_gop_number++;
1337 if (load_input_picture(s, pic_arg) < 0)
1340 if (select_input_picture(s) < 0) {
1345 if (s->new_picture.f.data[0]) {
1347 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1350 s->mb_info_ptr = av_packet_new_side_data(pkt,
1351 AV_PKT_DATA_H263_MB_INFO,
1352 s->mb_width*s->mb_height*12);
1353 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1356 for (i = 0; i < context_count; i++) {
1357 int start_y = s->thread_context[i]->start_mb_y;
1358 int end_y = s->thread_context[i]-> end_mb_y;
1359 int h = s->mb_height;
1360 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1361 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1363 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1366 s->pict_type = s->new_picture.f.pict_type;
1368 ff_MPV_frame_start(s, avctx);
1370 if (encode_picture(s, s->picture_number) < 0)
1373 avctx->header_bits = s->header_bits;
1374 avctx->mv_bits = s->mv_bits;
1375 avctx->misc_bits = s->misc_bits;
1376 avctx->i_tex_bits = s->i_tex_bits;
1377 avctx->p_tex_bits = s->p_tex_bits;
1378 avctx->i_count = s->i_count;
1379 // FIXME f/b_count in avctx
1380 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1381 avctx->skip_count = s->skip_count;
1383 ff_MPV_frame_end(s);
1385 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1386 ff_mjpeg_encode_picture_trailer(s);
1388 if (avctx->rc_buffer_size) {
1389 RateControlContext *rcc = &s->rc_context;
1390 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1392 if (put_bits_count(&s->pb) > max_size &&
1393 s->lambda < s->avctx->lmax) {
1394 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1395 (s->qscale + 1) / s->qscale);
1396 if (s->adaptive_quant) {
1398 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1399 s->lambda_table[i] =
1400 FFMAX(s->lambda_table[i] + 1,
1401 s->lambda_table[i] * (s->qscale + 1) /
1404 s->mb_skipped = 0; // done in MPV_frame_start()
1405 // done in encode_picture() so we must undo it
1406 if (s->pict_type == AV_PICTURE_TYPE_P) {
1407 if (s->flipflop_rounding ||
1408 s->codec_id == AV_CODEC_ID_H263P ||
1409 s->codec_id == AV_CODEC_ID_MPEG4)
1410 s->no_rounding ^= 1;
1412 if (s->pict_type != AV_PICTURE_TYPE_B) {
1413 s->time_base = s->last_time_base;
1414 s->last_non_b_time = s->time - s->pp_time;
1416 for (i = 0; i < context_count; i++) {
1417 PutBitContext *pb = &s->thread_context[i]->pb;
1418 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1423 assert(s->avctx->rc_max_rate);
1426 if (s->flags & CODEC_FLAG_PASS1)
1427 ff_write_pass1_stats(s);
1429 for (i = 0; i < 4; i++) {
1430 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1431 avctx->error[i] += s->current_picture_ptr->f.error[i];
1434 if (s->flags & CODEC_FLAG_PASS1)
1435 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1436 avctx->i_tex_bits + avctx->p_tex_bits ==
1437 put_bits_count(&s->pb));
1438 flush_put_bits(&s->pb);
1439 s->frame_bits = put_bits_count(&s->pb);
1441 stuffing_count = ff_vbv_update(s, s->frame_bits);
1442 if (stuffing_count) {
1443 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1444 stuffing_count + 50) {
1445 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1449 switch (s->codec_id) {
1450 case AV_CODEC_ID_MPEG1VIDEO:
1451 case AV_CODEC_ID_MPEG2VIDEO:
1452 while (stuffing_count--) {
1453 put_bits(&s->pb, 8, 0);
1456 case AV_CODEC_ID_MPEG4:
1457 put_bits(&s->pb, 16, 0);
1458 put_bits(&s->pb, 16, 0x1C3);
1459 stuffing_count -= 4;
1460 while (stuffing_count--) {
1461 put_bits(&s->pb, 8, 0xFF);
1465 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1467 flush_put_bits(&s->pb);
1468 s->frame_bits = put_bits_count(&s->pb);
1471 /* update mpeg1/2 vbv_delay for CBR */
1472 if (s->avctx->rc_max_rate &&
1473 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1474 s->out_format == FMT_MPEG1 &&
1475 90000LL * (avctx->rc_buffer_size - 1) <=
1476 s->avctx->rc_max_rate * 0xFFFFLL) {
1477 int vbv_delay, min_delay;
1478 double inbits = s->avctx->rc_max_rate *
1479 av_q2d(s->avctx->time_base);
1480 int minbits = s->frame_bits - 8 *
1481 (s->vbv_delay_ptr - s->pb.buf - 1);
1482 double bits = s->rc_context.buffer_index + minbits - inbits;
1485 av_log(s->avctx, AV_LOG_ERROR,
1486 "Internal error, negative bits\n");
1488 assert(s->repeat_first_field == 0);
1490 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1491 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1492 s->avctx->rc_max_rate;
1494 vbv_delay = FFMAX(vbv_delay, min_delay);
1496 assert(vbv_delay < 0xFFFF);
1498 s->vbv_delay_ptr[0] &= 0xF8;
1499 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1500 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1501 s->vbv_delay_ptr[2] &= 0x07;
1502 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1503 avctx->vbv_delay = vbv_delay * 300;
1505 s->total_bits += s->frame_bits;
1506 avctx->frame_bits = s->frame_bits;
1508 pkt->pts = s->current_picture.f.pts;
1509 if (!s->low_delay) {
1510 if (!s->current_picture.f.coded_picture_number)
1511 pkt->dts = pkt->pts - s->dts_delta;
1513 pkt->dts = s->reordered_pts;
1514 s->reordered_pts = s->input_picture[0]->f.pts;
1516 pkt->dts = pkt->pts;
1517 if (s->current_picture.f.key_frame)
1518 pkt->flags |= AV_PKT_FLAG_KEY;
1520 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1524 assert((s->frame_bits & 7) == 0);
1526 pkt->size = s->frame_bits / 8;
1527 *got_packet = !!pkt->size;
1531 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1532 int n, int threshold)
1534 static const char tab[64] = {
1535 3, 2, 2, 1, 1, 1, 1, 1,
1536 1, 1, 1, 1, 1, 1, 1, 1,
1537 1, 1, 1, 1, 1, 1, 1, 1,
1538 0, 0, 0, 0, 0, 0, 0, 0,
1539 0, 0, 0, 0, 0, 0, 0, 0,
1540 0, 0, 0, 0, 0, 0, 0, 0,
1541 0, 0, 0, 0, 0, 0, 0, 0,
1542 0, 0, 0, 0, 0, 0, 0, 0
1547 int16_t *block = s->block[n];
1548 const int last_index = s->block_last_index[n];
1551 if (threshold < 0) {
1553 threshold = -threshold;
1557 /* Are all we could set to zero already zero? */
1558 if (last_index <= skip_dc - 1)
1561 for (i = 0; i <= last_index; i++) {
1562 const int j = s->intra_scantable.permutated[i];
1563 const int level = FFABS(block[j]);
1565 if (skip_dc && i == 0)
1569 } else if (level > 1) {
1575 if (score >= threshold)
1577 for (i = skip_dc; i <= last_index; i++) {
1578 const int j = s->intra_scantable.permutated[i];
1582 s->block_last_index[n] = 0;
1584 s->block_last_index[n] = -1;
1587 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1591 const int maxlevel = s->max_qcoeff;
1592 const int minlevel = s->min_qcoeff;
1596 i = 1; // skip clipping of intra dc
1600 for (; i <= last_index; i++) {
1601 const int j = s->intra_scantable.permutated[i];
1602 int level = block[j];
1604 if (level > maxlevel) {
1607 } else if (level < minlevel) {
1615 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1616 av_log(s->avctx, AV_LOG_INFO,
1617 "warning, clipping %d dct coefficients to %d..%d\n",
1618 overflow, minlevel, maxlevel);
1621 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1625 for (y = 0; y < 8; y++) {
1626 for (x = 0; x < 8; x++) {
1632 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1633 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1634 int v = ptr[x2 + y2 * stride];
1640 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1645 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1646 int motion_x, int motion_y,
1647 int mb_block_height,
1650 int16_t weight[8][64];
1651 int16_t orig[8][64];
1652 const int mb_x = s->mb_x;
1653 const int mb_y = s->mb_y;
1656 int dct_offset = s->linesize * 8; // default for progressive frames
1657 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1660 for (i = 0; i < mb_block_count; i++)
1661 skip_dct[i] = s->skipdct;
1663 if (s->adaptive_quant) {
1664 const int last_qp = s->qscale;
1665 const int mb_xy = mb_x + mb_y * s->mb_stride;
1667 s->lambda = s->lambda_table[mb_xy];
1670 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1671 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1672 s->dquant = s->qscale - last_qp;
1674 if (s->out_format == FMT_H263) {
1675 s->dquant = av_clip(s->dquant, -2, 2);
1677 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1679 if (s->pict_type == AV_PICTURE_TYPE_B) {
1680 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1683 if (s->mv_type == MV_TYPE_8X8)
1689 ff_set_qscale(s, last_qp + s->dquant);
1690 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1691 ff_set_qscale(s, s->qscale + s->dquant);
1693 wrap_y = s->linesize;
1694 wrap_c = s->uvlinesize;
1695 ptr_y = s->new_picture.f.data[0] +
1696 (mb_y * 16 * wrap_y) + mb_x * 16;
1697 ptr_cb = s->new_picture.f.data[1] +
1698 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1699 ptr_cr = s->new_picture.f.data[2] +
1700 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1702 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1703 uint8_t *ebuf = s->edge_emu_buffer + 32;
1704 s->vdsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1705 mb_y * 16, s->width, s->height);
1707 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
1708 mb_block_height, mb_x * 8, mb_y * 8,
1709 s->width >> 1, s->height >> 1);
1710 ptr_cb = ebuf + 18 * wrap_y;
1711 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
1712 mb_block_height, mb_x * 8, mb_y * 8,
1713 s->width >> 1, s->height >> 1);
1714 ptr_cr = ebuf + 18 * wrap_y + 8;
1718 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1719 int progressive_score, interlaced_score;
1721 s->interlaced_dct = 0;
1722 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1724 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1725 NULL, wrap_y, 8) - 400;
1727 if (progressive_score > 0) {
1728 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1729 NULL, wrap_y * 2, 8) +
1730 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1731 NULL, wrap_y * 2, 8);
1732 if (progressive_score > interlaced_score) {
1733 s->interlaced_dct = 1;
1735 dct_offset = wrap_y;
1737 if (s->chroma_format == CHROMA_422)
1743 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1744 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1745 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1746 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1748 if (s->flags & CODEC_FLAG_GRAY) {
1752 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1753 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1754 if (!s->chroma_y_shift) { /* 422 */
1755 s->dsp.get_pixels(s->block[6],
1756 ptr_cb + (dct_offset >> 1), wrap_c);
1757 s->dsp.get_pixels(s->block[7],
1758 ptr_cr + (dct_offset >> 1), wrap_c);
1762 op_pixels_func (*op_pix)[4];
1763 qpel_mc_func (*op_qpix)[16];
1764 uint8_t *dest_y, *dest_cb, *dest_cr;
1766 dest_y = s->dest[0];
1767 dest_cb = s->dest[1];
1768 dest_cr = s->dest[2];
1770 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1771 op_pix = s->hdsp.put_pixels_tab;
1772 op_qpix = s->dsp.put_qpel_pixels_tab;
1774 op_pix = s->hdsp.put_no_rnd_pixels_tab;
1775 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1778 if (s->mv_dir & MV_DIR_FORWARD) {
1779 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1780 s->last_picture.f.data,
1782 op_pix = s->hdsp.avg_pixels_tab;
1783 op_qpix = s->dsp.avg_qpel_pixels_tab;
1785 if (s->mv_dir & MV_DIR_BACKWARD) {
1786 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1787 s->next_picture.f.data,
1791 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1792 int progressive_score, interlaced_score;
1794 s->interlaced_dct = 0;
1795 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1798 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1799 ptr_y + wrap_y * 8, wrap_y,
1802 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1803 progressive_score -= 400;
1805 if (progressive_score > 0) {
1806 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1809 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1813 if (progressive_score > interlaced_score) {
1814 s->interlaced_dct = 1;
1816 dct_offset = wrap_y;
1818 if (s->chroma_format == CHROMA_422)
1824 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1825 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1826 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1827 dest_y + dct_offset, wrap_y);
1828 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1829 dest_y + dct_offset + 8, wrap_y);
1831 if (s->flags & CODEC_FLAG_GRAY) {
1835 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1836 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1837 if (!s->chroma_y_shift) { /* 422 */
1838 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1839 dest_cb + (dct_offset >> 1), wrap_c);
1840 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1841 dest_cr + (dct_offset >> 1), wrap_c);
1844 /* pre quantization */
1845 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1846 2 * s->qscale * s->qscale) {
1848 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1849 wrap_y, 8) < 20 * s->qscale)
1851 if (s->dsp.sad[1](NULL, ptr_y + 8,
1852 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1854 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1855 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1857 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1858 dest_y + dct_offset + 8,
1859 wrap_y, 8) < 20 * s->qscale)
1861 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1862 wrap_c, 8) < 20 * s->qscale)
1864 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1865 wrap_c, 8) < 20 * s->qscale)
1867 if (!s->chroma_y_shift) { /* 422 */
1868 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1869 dest_cb + (dct_offset >> 1),
1870 wrap_c, 8) < 20 * s->qscale)
1872 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1873 dest_cr + (dct_offset >> 1),
1874 wrap_c, 8) < 20 * s->qscale)
1880 if (s->quantizer_noise_shaping) {
1882 get_visual_weight(weight[0], ptr_y , wrap_y);
1884 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
1886 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
1888 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1890 get_visual_weight(weight[4], ptr_cb , wrap_c);
1892 get_visual_weight(weight[5], ptr_cr , wrap_c);
1893 if (!s->chroma_y_shift) { /* 422 */
1895 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1898 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1901 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
1904 /* DCT & quantize */
1905 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1907 for (i = 0; i < mb_block_count; i++) {
1910 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1911 // FIXME we could decide to change to quantizer instead of
1913 // JS: I don't think that would be a good idea it could lower
1914 // quality instead of improve it. Just INTRADC clipping
1915 // deserves changes in quantizer
1917 clip_coeffs(s, s->block[i], s->block_last_index[i]);
1919 s->block_last_index[i] = -1;
1921 if (s->quantizer_noise_shaping) {
1922 for (i = 0; i < mb_block_count; i++) {
1924 s->block_last_index[i] =
1925 dct_quantize_refine(s, s->block[i], weight[i],
1926 orig[i], i, s->qscale);
1931 if (s->luma_elim_threshold && !s->mb_intra)
1932 for (i = 0; i < 4; i++)
1933 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1934 if (s->chroma_elim_threshold && !s->mb_intra)
1935 for (i = 4; i < mb_block_count; i++)
1936 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1938 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
1939 for (i = 0; i < mb_block_count; i++) {
1940 if (s->block_last_index[i] == -1)
1941 s->coded_score[i] = INT_MAX / 256;
1946 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
1947 s->block_last_index[4] =
1948 s->block_last_index[5] = 0;
1950 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
1953 // non c quantize code returns incorrect block_last_index FIXME
1954 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
1955 for (i = 0; i < mb_block_count; i++) {
1957 if (s->block_last_index[i] > 0) {
1958 for (j = 63; j > 0; j--) {
1959 if (s->block[i][s->intra_scantable.permutated[j]])
1962 s->block_last_index[i] = j;
1967 /* huffman encode */
1968 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
1969 case AV_CODEC_ID_MPEG1VIDEO:
1970 case AV_CODEC_ID_MPEG2VIDEO:
1971 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1972 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
1974 case AV_CODEC_ID_MPEG4:
1975 if (CONFIG_MPEG4_ENCODER)
1976 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
1978 case AV_CODEC_ID_MSMPEG4V2:
1979 case AV_CODEC_ID_MSMPEG4V3:
1980 case AV_CODEC_ID_WMV1:
1981 if (CONFIG_MSMPEG4_ENCODER)
1982 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
1984 case AV_CODEC_ID_WMV2:
1985 if (CONFIG_WMV2_ENCODER)
1986 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
1988 case AV_CODEC_ID_H261:
1989 if (CONFIG_H261_ENCODER)
1990 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
1992 case AV_CODEC_ID_H263:
1993 case AV_CODEC_ID_H263P:
1994 case AV_CODEC_ID_FLV1:
1995 case AV_CODEC_ID_RV10:
1996 case AV_CODEC_ID_RV20:
1997 if (CONFIG_H263_ENCODER)
1998 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2000 case AV_CODEC_ID_MJPEG:
2001 if (CONFIG_MJPEG_ENCODER)
2002 ff_mjpeg_encode_mb(s, s->block);
2009 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2011 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
2012 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2015 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2018 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2021 d->mb_skip_run= s->mb_skip_run;
2023 d->last_dc[i] = s->last_dc[i];
2026 d->mv_bits= s->mv_bits;
2027 d->i_tex_bits= s->i_tex_bits;
2028 d->p_tex_bits= s->p_tex_bits;
2029 d->i_count= s->i_count;
2030 d->f_count= s->f_count;
2031 d->b_count= s->b_count;
2032 d->skip_count= s->skip_count;
2033 d->misc_bits= s->misc_bits;
2037 d->qscale= s->qscale;
2038 d->dquant= s->dquant;
2040 d->esc3_level_length= s->esc3_level_length;
2043 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2046 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2047 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2050 d->mb_skip_run= s->mb_skip_run;
2052 d->last_dc[i] = s->last_dc[i];
2055 d->mv_bits= s->mv_bits;
2056 d->i_tex_bits= s->i_tex_bits;
2057 d->p_tex_bits= s->p_tex_bits;
2058 d->i_count= s->i_count;
2059 d->f_count= s->f_count;
2060 d->b_count= s->b_count;
2061 d->skip_count= s->skip_count;
2062 d->misc_bits= s->misc_bits;
2064 d->mb_intra= s->mb_intra;
2065 d->mb_skipped= s->mb_skipped;
2066 d->mv_type= s->mv_type;
2067 d->mv_dir= s->mv_dir;
2069 if(s->data_partitioning){
2071 d->tex_pb= s->tex_pb;
2075 d->block_last_index[i]= s->block_last_index[i];
2076 d->interlaced_dct= s->interlaced_dct;
2077 d->qscale= s->qscale;
2079 d->esc3_level_length= s->esc3_level_length;
2082 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2083 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2084 int *dmin, int *next_block, int motion_x, int motion_y)
2087 uint8_t *dest_backup[3];
2089 copy_context_before_encode(s, backup, type);
2091 s->block= s->blocks[*next_block];
2092 s->pb= pb[*next_block];
2093 if(s->data_partitioning){
2094 s->pb2 = pb2 [*next_block];
2095 s->tex_pb= tex_pb[*next_block];
2099 memcpy(dest_backup, s->dest, sizeof(s->dest));
2100 s->dest[0] = s->rd_scratchpad;
2101 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2102 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2103 assert(s->linesize >= 32); //FIXME
2106 encode_mb(s, motion_x, motion_y);
2108 score= put_bits_count(&s->pb);
2109 if(s->data_partitioning){
2110 score+= put_bits_count(&s->pb2);
2111 score+= put_bits_count(&s->tex_pb);
2114 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2115 ff_MPV_decode_mb(s, s->block);
2117 score *= s->lambda2;
2118 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2122 memcpy(s->dest, dest_backup, sizeof(s->dest));
2129 copy_context_after_encode(best, s, type);
2133 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2134 uint32_t *sq = ff_squareTbl + 256;
2139 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2140 else if(w==8 && h==8)
2141 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2145 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2154 static int sse_mb(MpegEncContext *s){
2158 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2159 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2162 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2163 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2164 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2165 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2167 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2168 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2169 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2172 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2173 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2174 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2177 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2178 MpegEncContext *s= *(void**)arg;
2182 s->me.dia_size= s->avctx->pre_dia_size;
2183 s->first_slice_line=1;
2184 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2185 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2186 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2188 s->first_slice_line=0;
2196 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2197 MpegEncContext *s= *(void**)arg;
2199 ff_check_alignment();
2201 s->me.dia_size= s->avctx->dia_size;
2202 s->first_slice_line=1;
2203 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2204 s->mb_x=0; //for block init below
2205 ff_init_block_index(s);
2206 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2207 s->block_index[0]+=2;
2208 s->block_index[1]+=2;
2209 s->block_index[2]+=2;
2210 s->block_index[3]+=2;
2212 /* compute motion vector & mb_type and store in context */
2213 if(s->pict_type==AV_PICTURE_TYPE_B)
2214 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2216 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2218 s->first_slice_line=0;
2223 static int mb_var_thread(AVCodecContext *c, void *arg){
2224 MpegEncContext *s= *(void**)arg;
2227 ff_check_alignment();
2229 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2230 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2233 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2235 int sum = s->dsp.pix_sum(pix, s->linesize);
2237 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2239 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2240 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2241 s->me.mb_var_sum_temp += varc;
2247 static void write_slice_end(MpegEncContext *s){
2248 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2249 if(s->partitioned_frame){
2250 ff_mpeg4_merge_partitions(s);
2253 ff_mpeg4_stuffing(&s->pb);
2254 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2255 ff_mjpeg_encode_stuffing(&s->pb);
2258 avpriv_align_put_bits(&s->pb);
2259 flush_put_bits(&s->pb);
2261 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2262 s->misc_bits+= get_bits_diff(s);
2265 static void write_mb_info(MpegEncContext *s)
2267 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2268 int offset = put_bits_count(&s->pb);
2269 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2270 int gobn = s->mb_y / s->gob_index;
2272 if (CONFIG_H263_ENCODER)
2273 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2274 bytestream_put_le32(&ptr, offset);
2275 bytestream_put_byte(&ptr, s->qscale);
2276 bytestream_put_byte(&ptr, gobn);
2277 bytestream_put_le16(&ptr, mba);
2278 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2279 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2280 /* 4MV not implemented */
2281 bytestream_put_byte(&ptr, 0); /* hmv2 */
2282 bytestream_put_byte(&ptr, 0); /* vmv2 */
2285 static void update_mb_info(MpegEncContext *s, int startcode)
2289 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2290 s->mb_info_size += 12;
2291 s->prev_mb_info = s->last_mb_info;
2294 s->prev_mb_info = put_bits_count(&s->pb)/8;
2295 /* This might have incremented mb_info_size above, and we return without
2296 * actually writing any info into that slot yet. But in that case,
2297 * this will be called again at the start of the after writing the
2298 * start code, actually writing the mb info. */
2302 s->last_mb_info = put_bits_count(&s->pb)/8;
2303 if (!s->mb_info_size)
2304 s->mb_info_size += 12;
2308 static int encode_thread(AVCodecContext *c, void *arg){
2309 MpegEncContext *s= *(void**)arg;
2310 int mb_x, mb_y, pdif = 0;
2311 int chr_h= 16>>s->chroma_y_shift;
2313 MpegEncContext best_s, backup_s;
2314 uint8_t bit_buf[2][MAX_MB_BYTES];
2315 uint8_t bit_buf2[2][MAX_MB_BYTES];
2316 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2317 PutBitContext pb[2], pb2[2], tex_pb[2];
2319 ff_check_alignment();
2322 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2323 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2324 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2327 s->last_bits= put_bits_count(&s->pb);
2338 /* init last dc values */
2339 /* note: quant matrix value (8) is implied here */
2340 s->last_dc[i] = 128 << s->intra_dc_precision;
2342 s->current_picture.f.error[i] = 0;
2345 memset(s->last_mv, 0, sizeof(s->last_mv));
2349 switch(s->codec_id){
2350 case AV_CODEC_ID_H263:
2351 case AV_CODEC_ID_H263P:
2352 case AV_CODEC_ID_FLV1:
2353 if (CONFIG_H263_ENCODER)
2354 s->gob_index = ff_h263_get_gob_height(s);
2356 case AV_CODEC_ID_MPEG4:
2357 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2358 ff_mpeg4_init_partitions(s);
2364 s->first_slice_line = 1;
2365 s->ptr_lastgob = s->pb.buf;
2366 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2370 ff_set_qscale(s, s->qscale);
2371 ff_init_block_index(s);
2373 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2374 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2375 int mb_type= s->mb_type[xy];
2380 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2381 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2384 if(s->data_partitioning){
2385 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2386 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2387 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2393 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2394 ff_update_block_index(s);
2396 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2397 ff_h261_reorder_mb_index(s);
2398 xy= s->mb_y*s->mb_stride + s->mb_x;
2399 mb_type= s->mb_type[xy];
2402 /* write gob / video packet header */
2404 int current_packet_size, is_gob_start;
2406 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2408 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2410 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2412 switch(s->codec_id){
2413 case AV_CODEC_ID_H263:
2414 case AV_CODEC_ID_H263P:
2415 if(!s->h263_slice_structured)
2416 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2418 case AV_CODEC_ID_MPEG2VIDEO:
2419 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2420 case AV_CODEC_ID_MPEG1VIDEO:
2421 if(s->mb_skip_run) is_gob_start=0;
2426 if(s->start_mb_y != mb_y || mb_x!=0){
2429 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2430 ff_mpeg4_init_partitions(s);
2434 assert((put_bits_count(&s->pb)&7) == 0);
2435 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2437 if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2438 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2439 int d= 100 / s->avctx->error_rate;
2441 current_packet_size=0;
2442 s->pb.buf_ptr= s->ptr_lastgob;
2443 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2447 if (s->avctx->rtp_callback){
2448 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2449 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2451 update_mb_info(s, 1);
2453 switch(s->codec_id){
2454 case AV_CODEC_ID_MPEG4:
2455 if (CONFIG_MPEG4_ENCODER) {
2456 ff_mpeg4_encode_video_packet_header(s);
2457 ff_mpeg4_clean_buffers(s);
2460 case AV_CODEC_ID_MPEG1VIDEO:
2461 case AV_CODEC_ID_MPEG2VIDEO:
2462 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2463 ff_mpeg1_encode_slice_header(s);
2464 ff_mpeg1_clean_buffers(s);
2467 case AV_CODEC_ID_H263:
2468 case AV_CODEC_ID_H263P:
2469 if (CONFIG_H263_ENCODER)
2470 ff_h263_encode_gob_header(s, mb_y);
2474 if(s->flags&CODEC_FLAG_PASS1){
2475 int bits= put_bits_count(&s->pb);
2476 s->misc_bits+= bits - s->last_bits;
2480 s->ptr_lastgob += current_packet_size;
2481 s->first_slice_line=1;
2482 s->resync_mb_x=mb_x;
2483 s->resync_mb_y=mb_y;
2487 if( (s->resync_mb_x == s->mb_x)
2488 && s->resync_mb_y+1 == s->mb_y){
2489 s->first_slice_line=0;
2493 s->dquant=0; //only for QP_RD
2495 update_mb_info(s, 0);
2497 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2499 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2501 copy_context_before_encode(&backup_s, s, -1);
2503 best_s.data_partitioning= s->data_partitioning;
2504 best_s.partitioned_frame= s->partitioned_frame;
2505 if(s->data_partitioning){
2506 backup_s.pb2= s->pb2;
2507 backup_s.tex_pb= s->tex_pb;
2510 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2511 s->mv_dir = MV_DIR_FORWARD;
2512 s->mv_type = MV_TYPE_16X16;
2514 s->mv[0][0][0] = s->p_mv_table[xy][0];
2515 s->mv[0][0][1] = s->p_mv_table[xy][1];
2516 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2517 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2519 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2520 s->mv_dir = MV_DIR_FORWARD;
2521 s->mv_type = MV_TYPE_FIELD;
2524 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2525 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2526 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2528 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2529 &dmin, &next_block, 0, 0);
2531 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2532 s->mv_dir = MV_DIR_FORWARD;
2533 s->mv_type = MV_TYPE_16X16;
2537 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2538 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2540 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2541 s->mv_dir = MV_DIR_FORWARD;
2542 s->mv_type = MV_TYPE_8X8;
2545 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2546 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2548 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2549 &dmin, &next_block, 0, 0);
2551 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2552 s->mv_dir = MV_DIR_FORWARD;
2553 s->mv_type = MV_TYPE_16X16;
2555 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2556 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2557 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2558 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2560 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2561 s->mv_dir = MV_DIR_BACKWARD;
2562 s->mv_type = MV_TYPE_16X16;
2564 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2565 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2566 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2567 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2569 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2570 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2571 s->mv_type = MV_TYPE_16X16;
2573 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2574 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2575 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2576 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2577 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2578 &dmin, &next_block, 0, 0);
2580 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2581 s->mv_dir = MV_DIR_FORWARD;
2582 s->mv_type = MV_TYPE_FIELD;
2585 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2586 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2587 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2589 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2590 &dmin, &next_block, 0, 0);
2592 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2593 s->mv_dir = MV_DIR_BACKWARD;
2594 s->mv_type = MV_TYPE_FIELD;
2597 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2598 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2599 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2601 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2602 &dmin, &next_block, 0, 0);
2604 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2605 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2606 s->mv_type = MV_TYPE_FIELD;
2608 for(dir=0; dir<2; dir++){
2610 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2611 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2612 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2615 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2616 &dmin, &next_block, 0, 0);
2618 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2620 s->mv_type = MV_TYPE_16X16;
2624 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2625 &dmin, &next_block, 0, 0);
2626 if(s->h263_pred || s->h263_aic){
2628 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2630 ff_clean_intra_table_entries(s); //old mode?
2634 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2635 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2636 const int last_qp= backup_s.qscale;
2639 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2640 static const int dquant_tab[4]={-1,1,-2,2};
2642 assert(backup_s.dquant == 0);
2645 s->mv_dir= best_s.mv_dir;
2646 s->mv_type = MV_TYPE_16X16;
2647 s->mb_intra= best_s.mb_intra;
2648 s->mv[0][0][0] = best_s.mv[0][0][0];
2649 s->mv[0][0][1] = best_s.mv[0][0][1];
2650 s->mv[1][0][0] = best_s.mv[1][0][0];
2651 s->mv[1][0][1] = best_s.mv[1][0][1];
2653 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2654 for(; qpi<4; qpi++){
2655 int dquant= dquant_tab[qpi];
2656 qp= last_qp + dquant;
2657 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2659 backup_s.dquant= dquant;
2660 if(s->mb_intra && s->dc_val[0]){
2662 dc[i]= s->dc_val[0][ s->block_index[i] ];
2663 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2667 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2668 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2669 if(best_s.qscale != qp){
2670 if(s->mb_intra && s->dc_val[0]){
2672 s->dc_val[0][ s->block_index[i] ]= dc[i];
2673 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2680 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2681 int mx= s->b_direct_mv_table[xy][0];
2682 int my= s->b_direct_mv_table[xy][1];
2684 backup_s.dquant = 0;
2685 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2687 ff_mpeg4_set_direct_mv(s, mx, my);
2688 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2689 &dmin, &next_block, mx, my);
2691 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2692 backup_s.dquant = 0;
2693 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2695 ff_mpeg4_set_direct_mv(s, 0, 0);
2696 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2697 &dmin, &next_block, 0, 0);
2699 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2702 coded |= s->block_last_index[i];
2705 memcpy(s->mv, best_s.mv, sizeof(s->mv));
2706 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2707 mx=my=0; //FIXME find the one we actually used
2708 ff_mpeg4_set_direct_mv(s, mx, my);
2709 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2717 s->mv_dir= best_s.mv_dir;
2718 s->mv_type = best_s.mv_type;
2720 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2721 s->mv[0][0][1] = best_s.mv[0][0][1];
2722 s->mv[1][0][0] = best_s.mv[1][0][0];
2723 s->mv[1][0][1] = best_s.mv[1][0][1];*/
2726 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2727 &dmin, &next_block, mx, my);
2732 s->current_picture.qscale_table[xy] = best_s.qscale;
2734 copy_context_after_encode(s, &best_s, -1);
2736 pb_bits_count= put_bits_count(&s->pb);
2737 flush_put_bits(&s->pb);
2738 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2741 if(s->data_partitioning){
2742 pb2_bits_count= put_bits_count(&s->pb2);
2743 flush_put_bits(&s->pb2);
2744 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2745 s->pb2= backup_s.pb2;
2747 tex_pb_bits_count= put_bits_count(&s->tex_pb);
2748 flush_put_bits(&s->tex_pb);
2749 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2750 s->tex_pb= backup_s.tex_pb;
2752 s->last_bits= put_bits_count(&s->pb);
2754 if (CONFIG_H263_ENCODER &&
2755 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2756 ff_h263_update_motion_val(s);
2758 if(next_block==0){ //FIXME 16 vs linesize16
2759 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2760 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2761 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2764 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2765 ff_MPV_decode_mb(s, s->block);
2767 int motion_x = 0, motion_y = 0;
2768 s->mv_type=MV_TYPE_16X16;
2769 // only one MB-Type possible
2772 case CANDIDATE_MB_TYPE_INTRA:
2775 motion_x= s->mv[0][0][0] = 0;
2776 motion_y= s->mv[0][0][1] = 0;
2778 case CANDIDATE_MB_TYPE_INTER:
2779 s->mv_dir = MV_DIR_FORWARD;
2781 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2782 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2784 case CANDIDATE_MB_TYPE_INTER_I:
2785 s->mv_dir = MV_DIR_FORWARD;
2786 s->mv_type = MV_TYPE_FIELD;
2789 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2790 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2791 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2794 case CANDIDATE_MB_TYPE_INTER4V:
2795 s->mv_dir = MV_DIR_FORWARD;
2796 s->mv_type = MV_TYPE_8X8;
2799 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2800 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2803 case CANDIDATE_MB_TYPE_DIRECT:
2804 if (CONFIG_MPEG4_ENCODER) {
2805 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2807 motion_x=s->b_direct_mv_table[xy][0];
2808 motion_y=s->b_direct_mv_table[xy][1];
2809 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2812 case CANDIDATE_MB_TYPE_DIRECT0:
2813 if (CONFIG_MPEG4_ENCODER) {
2814 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2816 ff_mpeg4_set_direct_mv(s, 0, 0);
2819 case CANDIDATE_MB_TYPE_BIDIR:
2820 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2822 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2823 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2824 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2825 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2827 case CANDIDATE_MB_TYPE_BACKWARD:
2828 s->mv_dir = MV_DIR_BACKWARD;
2830 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2831 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2833 case CANDIDATE_MB_TYPE_FORWARD:
2834 s->mv_dir = MV_DIR_FORWARD;
2836 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2837 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2839 case CANDIDATE_MB_TYPE_FORWARD_I:
2840 s->mv_dir = MV_DIR_FORWARD;
2841 s->mv_type = MV_TYPE_FIELD;
2844 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2845 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2846 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2849 case CANDIDATE_MB_TYPE_BACKWARD_I:
2850 s->mv_dir = MV_DIR_BACKWARD;
2851 s->mv_type = MV_TYPE_FIELD;
2854 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2855 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2856 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2859 case CANDIDATE_MB_TYPE_BIDIR_I:
2860 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2861 s->mv_type = MV_TYPE_FIELD;
2863 for(dir=0; dir<2; dir++){
2865 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2866 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2867 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2872 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2875 encode_mb(s, motion_x, motion_y);
2877 // RAL: Update last macroblock type
2878 s->last_mv_dir = s->mv_dir;
2880 if (CONFIG_H263_ENCODER &&
2881 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2882 ff_h263_update_motion_val(s);
2884 ff_MPV_decode_mb(s, s->block);
2887 /* clean the MV table in IPS frames for direct mode in B frames */
2888 if(s->mb_intra /* && I,P,S_TYPE */){
2889 s->p_mv_table[xy][0]=0;
2890 s->p_mv_table[xy][1]=0;
2893 if(s->flags&CODEC_FLAG_PSNR){
2897 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2898 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2900 s->current_picture.f.error[0] += sse(
2901 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2902 s->dest[0], w, h, s->linesize);
2903 s->current_picture.f.error[1] += sse(
2904 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2905 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2906 s->current_picture.f.error[2] += sse(
2907 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2908 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2911 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2912 ff_h263_loop_filter(s);
2914 av_dlog(s->avctx, "MB %d %d bits\n",
2915 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
2919 //not beautiful here but we must write it before flushing so it has to be here
2920 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2921 ff_msmpeg4_encode_ext_header(s);
2925 /* Send the last GOB if RTP */
2926 if (s->avctx->rtp_callback) {
2927 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2928 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2929 /* Call the RTP callback to send the last GOB */
2931 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2937 #define MERGE(field) dst->field += src->field; src->field=0
2938 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2939 MERGE(me.scene_change_score);
2940 MERGE(me.mc_mb_var_sum_temp);
2941 MERGE(me.mb_var_sum_temp);
2944 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2947 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2948 MERGE(dct_count[1]);
2957 MERGE(er.error_count);
2958 MERGE(padding_bug_score);
2959 MERGE(current_picture.f.error[0]);
2960 MERGE(current_picture.f.error[1]);
2961 MERGE(current_picture.f.error[2]);
2963 if(dst->avctx->noise_reduction){
2964 for(i=0; i<64; i++){
2965 MERGE(dct_error_sum[0][i]);
2966 MERGE(dct_error_sum[1][i]);
2970 assert(put_bits_count(&src->pb) % 8 ==0);
2971 assert(put_bits_count(&dst->pb) % 8 ==0);
2972 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
2973 flush_put_bits(&dst->pb);
2976 static int estimate_qp(MpegEncContext *s, int dry_run){
2977 if (s->next_lambda){
2978 s->current_picture_ptr->f.quality =
2979 s->current_picture.f.quality = s->next_lambda;
2980 if(!dry_run) s->next_lambda= 0;
2981 } else if (!s->fixed_qscale) {
2982 s->current_picture_ptr->f.quality =
2983 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
2984 if (s->current_picture.f.quality < 0)
2988 if(s->adaptive_quant){
2989 switch(s->codec_id){
2990 case AV_CODEC_ID_MPEG4:
2991 if (CONFIG_MPEG4_ENCODER)
2992 ff_clean_mpeg4_qscales(s);
2994 case AV_CODEC_ID_H263:
2995 case AV_CODEC_ID_H263P:
2996 case AV_CODEC_ID_FLV1:
2997 if (CONFIG_H263_ENCODER)
2998 ff_clean_h263_qscales(s);
3001 ff_init_qscale_tab(s);
3004 s->lambda= s->lambda_table[0];
3007 s->lambda = s->current_picture.f.quality;
3012 /* must be called before writing the header */
3013 static void set_frame_distances(MpegEncContext * s){
3014 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3015 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3017 if(s->pict_type==AV_PICTURE_TYPE_B){
3018 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3019 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3021 s->pp_time= s->time - s->last_non_b_time;
3022 s->last_non_b_time= s->time;
3023 assert(s->picture_number==0 || s->pp_time > 0);
3027 static int encode_picture(MpegEncContext *s, int picture_number)
3031 int context_count = s->slice_context_count;
3033 s->picture_number = picture_number;
3035 /* Reset the average MB variance */
3036 s->me.mb_var_sum_temp =
3037 s->me.mc_mb_var_sum_temp = 0;
3039 /* we need to initialize some time vars before we can encode b-frames */
3040 // RAL: Condition added for MPEG1VIDEO
3041 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3042 set_frame_distances(s);
3043 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3044 ff_set_mpeg4_time(s);
3046 s->me.scene_change_score=0;
3048 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3050 if(s->pict_type==AV_PICTURE_TYPE_I){
3051 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3052 else s->no_rounding=0;
3053 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3054 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3055 s->no_rounding ^= 1;
3058 if(s->flags & CODEC_FLAG_PASS2){
3059 if (estimate_qp(s,1) < 0)
3061 ff_get_2pass_fcode(s);
3062 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3063 if(s->pict_type==AV_PICTURE_TYPE_B)
3064 s->lambda= s->last_lambda_for[s->pict_type];
3066 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3070 s->mb_intra=0; //for the rate distortion & bit compare functions
3071 for(i=1; i<context_count; i++){
3072 ret = ff_update_duplicate_context(s->thread_context[i], s);
3080 /* Estimate motion for every MB */
3081 if(s->pict_type != AV_PICTURE_TYPE_I){
3082 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3083 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3084 if (s->pict_type != AV_PICTURE_TYPE_B) {
3085 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3086 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3090 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3091 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3093 for(i=0; i<s->mb_stride*s->mb_height; i++)
3094 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3096 if(!s->fixed_qscale){
3097 /* finding spatial complexity for I-frame rate control */
3098 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3101 for(i=1; i<context_count; i++){
3102 merge_context_after_me(s, s->thread_context[i]);
3104 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3105 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3108 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3109 s->pict_type= AV_PICTURE_TYPE_I;
3110 for(i=0; i<s->mb_stride*s->mb_height; i++)
3111 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3112 av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3113 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3117 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3118 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3120 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3122 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3123 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3124 s->f_code= FFMAX3(s->f_code, a, b);
3127 ff_fix_long_p_mvs(s);
3128 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3129 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3133 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3134 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3139 if(s->pict_type==AV_PICTURE_TYPE_B){
3142 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3143 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3144 s->f_code = FFMAX(a, b);
3146 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3147 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3148 s->b_code = FFMAX(a, b);
3150 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3151 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3152 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3153 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3154 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3156 for(dir=0; dir<2; dir++){
3159 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3160 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3161 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3162 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3170 if (estimate_qp(s, 0) < 0)
3173 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3174 s->qscale= 3; //reduce clipping problems
3176 if (s->out_format == FMT_MJPEG) {
3177 /* for mjpeg, we do include qscale in the matrix */
3179 int j= s->dsp.idct_permutation[i];
3181 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3183 s->y_dc_scale_table=
3184 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3185 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3186 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3187 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3191 //FIXME var duplication
3192 s->current_picture_ptr->f.key_frame =
3193 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3194 s->current_picture_ptr->f.pict_type =
3195 s->current_picture.f.pict_type = s->pict_type;
3197 if (s->current_picture.f.key_frame)
3198 s->picture_in_gop_number=0;
3200 s->last_bits= put_bits_count(&s->pb);
3201 switch(s->out_format) {
3203 if (CONFIG_MJPEG_ENCODER)
3204 ff_mjpeg_encode_picture_header(s);
3207 if (CONFIG_H261_ENCODER)
3208 ff_h261_encode_picture_header(s, picture_number);
3211 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3212 ff_wmv2_encode_picture_header(s, picture_number);
3213 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3214 ff_msmpeg4_encode_picture_header(s, picture_number);
3215 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3216 ff_mpeg4_encode_picture_header(s, picture_number);
3217 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3218 ff_rv10_encode_picture_header(s, picture_number);
3219 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3220 ff_rv20_encode_picture_header(s, picture_number);
3221 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3222 ff_flv_encode_picture_header(s, picture_number);
3223 else if (CONFIG_H263_ENCODER)
3224 ff_h263_encode_picture_header(s, picture_number);
3227 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3228 ff_mpeg1_encode_picture_header(s, picture_number);
3233 bits= put_bits_count(&s->pb);
3234 s->header_bits= bits - s->last_bits;
3236 for(i=1; i<context_count; i++){
3237 update_duplicate_context_after_me(s->thread_context[i], s);
3239 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3240 for(i=1; i<context_count; i++){
3241 merge_context_after_encode(s, s->thread_context[i]);
3247 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3248 const int intra= s->mb_intra;
3251 s->dct_count[intra]++;
3253 for(i=0; i<64; i++){
3254 int level= block[i];
3258 s->dct_error_sum[intra][i] += level;
3259 level -= s->dct_offset[intra][i];
3260 if(level<0) level=0;
3262 s->dct_error_sum[intra][i] -= level;
3263 level += s->dct_offset[intra][i];
3264 if(level>0) level=0;
3271 static int dct_quantize_trellis_c(MpegEncContext *s,
3272 int16_t *block, int n,
3273 int qscale, int *overflow){
3275 const uint8_t *scantable= s->intra_scantable.scantable;
3276 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3278 unsigned int threshold1, threshold2;
3290 int coeff_count[64];
3291 int qmul, qadd, start_i, last_non_zero, i, dc;
3292 const int esc_length= s->ac_esc_length;
3294 uint8_t * last_length;
3295 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3297 s->dsp.fdct (block);
3299 if(s->dct_error_sum)
3300 s->denoise_dct(s, block);
3302 qadd= ((qscale-1)|1)*8;
3313 /* For AIC we skip quant/dequant of INTRADC */
3318 /* note: block[0] is assumed to be positive */
3319 block[0] = (block[0] + (q >> 1)) / q;
3322 qmat = s->q_intra_matrix[qscale];
3323 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3324 bias= 1<<(QMAT_SHIFT-1);
3325 length = s->intra_ac_vlc_length;
3326 last_length= s->intra_ac_vlc_last_length;
3330 qmat = s->q_inter_matrix[qscale];
3331 length = s->inter_ac_vlc_length;
3332 last_length= s->inter_ac_vlc_last_length;
3336 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3337 threshold2= (threshold1<<1);
3339 for(i=63; i>=start_i; i--) {
3340 const int j = scantable[i];
3341 int level = block[j] * qmat[j];
3343 if(((unsigned)(level+threshold1))>threshold2){
3349 for(i=start_i; i<=last_non_zero; i++) {
3350 const int j = scantable[i];
3351 int level = block[j] * qmat[j];
3353 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3354 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3355 if(((unsigned)(level+threshold1))>threshold2){
3357 level= (bias + level)>>QMAT_SHIFT;
3359 coeff[1][i]= level-1;
3360 // coeff[2][k]= level-2;
3362 level= (bias - level)>>QMAT_SHIFT;
3363 coeff[0][i]= -level;
3364 coeff[1][i]= -level+1;
3365 // coeff[2][k]= -level+2;
3367 coeff_count[i]= FFMIN(level, 2);
3368 assert(coeff_count[i]);
3371 coeff[0][i]= (level>>31)|1;
3376 *overflow= s->max_qcoeff < max; //overflow might have happened
3378 if(last_non_zero < start_i){
3379 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3380 return last_non_zero;
3383 score_tab[start_i]= 0;
3384 survivor[0]= start_i;
3387 for(i=start_i; i<=last_non_zero; i++){
3388 int level_index, j, zero_distortion;
3389 int dct_coeff= FFABS(block[ scantable[i] ]);
3390 int best_score=256*256*256*120;
3392 if (s->dsp.fdct == ff_fdct_ifast)
3393 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3394 zero_distortion= dct_coeff*dct_coeff;
3396 for(level_index=0; level_index < coeff_count[i]; level_index++){
3398 int level= coeff[level_index][i];
3399 const int alevel= FFABS(level);
3404 if(s->out_format == FMT_H263){
3405 unquant_coeff= alevel*qmul + qadd;
3407 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3409 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3410 unquant_coeff = (unquant_coeff - 1) | 1;
3412 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3413 unquant_coeff = (unquant_coeff - 1) | 1;
3418 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3420 if((level&(~127)) == 0){
3421 for(j=survivor_count-1; j>=0; j--){
3422 int run= i - survivor[j];
3423 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3424 score += score_tab[i-run];
3426 if(score < best_score){
3429 level_tab[i+1]= level-64;
3433 if(s->out_format == FMT_H263){
3434 for(j=survivor_count-1; j>=0; j--){
3435 int run= i - survivor[j];
3436 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3437 score += score_tab[i-run];
3438 if(score < last_score){
3441 last_level= level-64;
3447 distortion += esc_length*lambda;
3448 for(j=survivor_count-1; j>=0; j--){
3449 int run= i - survivor[j];
3450 int score= distortion + score_tab[i-run];
3452 if(score < best_score){
3455 level_tab[i+1]= level-64;
3459 if(s->out_format == FMT_H263){
3460 for(j=survivor_count-1; j>=0; j--){
3461 int run= i - survivor[j];
3462 int score= distortion + score_tab[i-run];
3463 if(score < last_score){
3466 last_level= level-64;
3474 score_tab[i+1]= best_score;
3476 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3477 if(last_non_zero <= 27){
3478 for(; survivor_count; survivor_count--){
3479 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3483 for(; survivor_count; survivor_count--){
3484 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3489 survivor[ survivor_count++ ]= i+1;
3492 if(s->out_format != FMT_H263){
3493 last_score= 256*256*256*120;
3494 for(i= survivor[0]; i<=last_non_zero + 1; i++){
3495 int score= score_tab[i];
3496 if(i) score += lambda*2; //FIXME exacter?
3498 if(score < last_score){
3501 last_level= level_tab[i];
3502 last_run= run_tab[i];
3507 s->coded_score[n] = last_score;
3509 dc= FFABS(block[0]);
3510 last_non_zero= last_i - 1;
3511 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3513 if(last_non_zero < start_i)
3514 return last_non_zero;
3516 if(last_non_zero == 0 && start_i == 0){
3518 int best_score= dc * dc;
3520 for(i=0; i<coeff_count[0]; i++){
3521 int level= coeff[i][0];
3522 int alevel= FFABS(level);
3523 int unquant_coeff, score, distortion;
3525 if(s->out_format == FMT_H263){
3526 unquant_coeff= (alevel*qmul + qadd)>>3;
3528 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3529 unquant_coeff = (unquant_coeff - 1) | 1;
3531 unquant_coeff = (unquant_coeff + 4) >> 3;
3532 unquant_coeff<<= 3 + 3;
3534 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3536 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3537 else score= distortion + esc_length*lambda;
3539 if(score < best_score){
3541 best_level= level - 64;
3544 block[0]= best_level;
3545 s->coded_score[n] = best_score - dc*dc;
3546 if(best_level == 0) return -1;
3547 else return last_non_zero;
3553 block[ perm_scantable[last_non_zero] ]= last_level;
3556 for(; i>start_i; i -= run_tab[i] + 1){
3557 block[ perm_scantable[i-1] ]= level_tab[i];
3560 return last_non_zero;
3563 //#define REFINE_STATS 1
3564 static int16_t basis[64][64];
3566 static void build_basis(uint8_t *perm){
3573 double s= 0.25*(1<<BASIS_SHIFT);
3575 int perm_index= perm[index];
3576 if(i==0) s*= sqrt(0.5);
3577 if(j==0) s*= sqrt(0.5);
3578 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3585 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3586 int16_t *block, int16_t *weight, int16_t *orig,
3589 LOCAL_ALIGNED_16(int16_t, d1, [64]);
3590 const uint8_t *scantable= s->intra_scantable.scantable;
3591 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3592 // unsigned int threshold1, threshold2;
3597 int qmul, qadd, start_i, last_non_zero, i, dc;
3599 uint8_t * last_length;
3601 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3604 static int after_last=0;
3605 static int to_zero=0;
3606 static int from_zero=0;
3609 static int messed_sign=0;
3612 if(basis[0][0] == 0)
3613 build_basis(s->dsp.idct_permutation);
3624 /* For AIC we skip quant/dequant of INTRADC */
3628 q <<= RECON_SHIFT-3;
3629 /* note: block[0] is assumed to be positive */
3631 // block[0] = (block[0] + (q >> 1)) / q;
3633 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3634 // bias= 1<<(QMAT_SHIFT-1);
3635 length = s->intra_ac_vlc_length;
3636 last_length= s->intra_ac_vlc_last_length;
3640 length = s->inter_ac_vlc_length;
3641 last_length= s->inter_ac_vlc_last_length;
3643 last_non_zero = s->block_last_index[n];
3648 dc += (1<<(RECON_SHIFT-1));
3649 for(i=0; i<64; i++){
3650 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3653 STOP_TIMER("memset rem[]")}
3656 for(i=0; i<64; i++){
3661 w= FFABS(weight[i]) + qns*one;
3662 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3665 // w=weight[i] = (63*qns + (w/2)) / w;
3671 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3677 for(i=start_i; i<=last_non_zero; i++){
3678 int j= perm_scantable[i];
3679 const int level= block[j];
3683 if(level<0) coeff= qmul*level - qadd;
3684 else coeff= qmul*level + qadd;
3685 run_tab[rle_index++]=run;
3688 s->dsp.add_8x8basis(rem, basis[j], coeff);
3694 if(last_non_zero>0){
3695 STOP_TIMER("init rem[]")
3702 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3705 int run2, best_unquant_change=0, analyze_gradient;
3709 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3711 if(analyze_gradient){
3715 for(i=0; i<64; i++){
3718 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3721 STOP_TIMER("rem*w*w")}
3731 const int level= block[0];
3732 int change, old_coeff;
3734 assert(s->mb_intra);
3738 for(change=-1; change<=1; change+=2){
3739 int new_level= level + change;
3740 int score, new_coeff;
3742 new_coeff= q*new_level;
3743 if(new_coeff >= 2048 || new_coeff < 0)
3746 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3747 if(score<best_score){
3750 best_change= change;
3751 best_unquant_change= new_coeff - old_coeff;
3758 run2= run_tab[rle_index++];
3762 for(i=start_i; i<64; i++){
3763 int j= perm_scantable[i];
3764 const int level= block[j];
3765 int change, old_coeff;
3767 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3771 if(level<0) old_coeff= qmul*level - qadd;
3772 else old_coeff= qmul*level + qadd;
3773 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3777 assert(run2>=0 || i >= last_non_zero );
3780 for(change=-1; change<=1; change+=2){
3781 int new_level= level + change;
3782 int score, new_coeff, unquant_change;
3785 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3789 if(new_level<0) new_coeff= qmul*new_level - qadd;
3790 else new_coeff= qmul*new_level + qadd;
3791 if(new_coeff >= 2048 || new_coeff <= -2048)
3793 //FIXME check for overflow
3796 if(level < 63 && level > -63){
3797 if(i < last_non_zero)
3798 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
3799 - length[UNI_AC_ENC_INDEX(run, level+64)];
3801 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3802 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3805 assert(FFABS(new_level)==1);
3807 if(analyze_gradient){
3808 int g= d1[ scantable[i] ];
3809 if(g && (g^new_level) >= 0)
3813 if(i < last_non_zero){
3814 int next_i= i + run2 + 1;
3815 int next_level= block[ perm_scantable[next_i] ] + 64;
3817 if(next_level&(~127))
3820 if(next_i < last_non_zero)
3821 score += length[UNI_AC_ENC_INDEX(run, 65)]
3822 + length[UNI_AC_ENC_INDEX(run2, next_level)]
3823 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3825 score += length[UNI_AC_ENC_INDEX(run, 65)]
3826 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3827 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3829 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3831 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3832 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3838 assert(FFABS(level)==1);
3840 if(i < last_non_zero){
3841 int next_i= i + run2 + 1;
3842 int next_level= block[ perm_scantable[next_i] ] + 64;
3844 if(next_level&(~127))
3847 if(next_i < last_non_zero)
3848 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3849 - length[UNI_AC_ENC_INDEX(run2, next_level)]
3850 - length[UNI_AC_ENC_INDEX(run, 65)];
3852 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3853 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3854 - length[UNI_AC_ENC_INDEX(run, 65)];
3856 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3858 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3859 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3866 unquant_change= new_coeff - old_coeff;
3867 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3869 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3870 if(score<best_score){
3873 best_change= change;
3874 best_unquant_change= unquant_change;
3878 prev_level= level + 64;
3879 if(prev_level&(~127))
3888 STOP_TIMER("iterative step")}
3892 int j= perm_scantable[ best_coeff ];
3894 block[j] += best_change;
3896 if(best_coeff > last_non_zero){
3897 last_non_zero= best_coeff;
3905 if(block[j] - best_change){
3906 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
3918 for(; last_non_zero>=start_i; last_non_zero--){
3919 if(block[perm_scantable[last_non_zero]])
3925 if(256*256*256*64 % count == 0){
3926 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
3931 for(i=start_i; i<=last_non_zero; i++){
3932 int j= perm_scantable[i];
3933 const int level= block[j];
3936 run_tab[rle_index++]=run;
3943 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
3949 if(last_non_zero>0){
3950 STOP_TIMER("iterative search")
3955 return last_non_zero;
3958 int ff_dct_quantize_c(MpegEncContext *s,
3959 int16_t *block, int n,
3960 int qscale, int *overflow)
3962 int i, j, level, last_non_zero, q, start_i;
3964 const uint8_t *scantable= s->intra_scantable.scantable;
3967 unsigned int threshold1, threshold2;
3969 s->dsp.fdct (block);
3971 if(s->dct_error_sum)
3972 s->denoise_dct(s, block);
3982 /* For AIC we skip quant/dequant of INTRADC */
3985 /* note: block[0] is assumed to be positive */
3986 block[0] = (block[0] + (q >> 1)) / q;
3989 qmat = s->q_intra_matrix[qscale];
3990 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3994 qmat = s->q_inter_matrix[qscale];
3995 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3997 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3998 threshold2= (threshold1<<1);
3999 for(i=63;i>=start_i;i--) {
4001 level = block[j] * qmat[j];
4003 if(((unsigned)(level+threshold1))>threshold2){
4010 for(i=start_i; i<=last_non_zero; i++) {
4012 level = block[j] * qmat[j];
4014 // if( bias+level >= (1<<QMAT_SHIFT)
4015 // || bias-level >= (1<<QMAT_SHIFT)){
4016 if(((unsigned)(level+threshold1))>threshold2){
4018 level= (bias + level)>>QMAT_SHIFT;
4021 level= (bias - level)>>QMAT_SHIFT;
4029 *overflow= s->max_qcoeff < max; //overflow might have happened
4031 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4032 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4033 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4035 return last_non_zero;
4038 #define OFFSET(x) offsetof(MpegEncContext, x)
4039 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4040 static const AVOption h263_options[] = {
4041 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4042 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4043 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4048 static const AVClass h263_class = {
4049 .class_name = "H.263 encoder",
4050 .item_name = av_default_item_name,
4051 .option = h263_options,
4052 .version = LIBAVUTIL_VERSION_INT,
4055 AVCodec ff_h263_encoder = {
4057 .type = AVMEDIA_TYPE_VIDEO,
4058 .id = AV_CODEC_ID_H263,
4059 .priv_data_size = sizeof(MpegEncContext),
4060 .init = ff_MPV_encode_init,
4061 .encode2 = ff_MPV_encode_picture,
4062 .close = ff_MPV_encode_end,
4063 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4064 .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4065 .priv_class = &h263_class,
4068 static const AVOption h263p_options[] = {
4069 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4070 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4071 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4072 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4076 static const AVClass h263p_class = {
4077 .class_name = "H.263p encoder",
4078 .item_name = av_default_item_name,
4079 .option = h263p_options,
4080 .version = LIBAVUTIL_VERSION_INT,
4083 AVCodec ff_h263p_encoder = {
4085 .type = AVMEDIA_TYPE_VIDEO,
4086 .id = AV_CODEC_ID_H263P,
4087 .priv_data_size = sizeof(MpegEncContext),
4088 .init = ff_MPV_encode_init,
4089 .encode2 = ff_MPV_encode_picture,
4090 .close = ff_MPV_encode_end,
4091 .capabilities = CODEC_CAP_SLICE_THREADS,
4092 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4093 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4094 .priv_class = &h263p_class,
4097 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4099 AVCodec ff_msmpeg4v2_encoder = {
4100 .name = "msmpeg4v2",
4101 .type = AVMEDIA_TYPE_VIDEO,
4102 .id = AV_CODEC_ID_MSMPEG4V2,
4103 .priv_data_size = sizeof(MpegEncContext),
4104 .init = ff_MPV_encode_init,
4105 .encode2 = ff_MPV_encode_picture,
4106 .close = ff_MPV_encode_end,
4107 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4108 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4109 .priv_class = &msmpeg4v2_class,
4112 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4114 AVCodec ff_msmpeg4v3_encoder = {
4116 .type = AVMEDIA_TYPE_VIDEO,
4117 .id = AV_CODEC_ID_MSMPEG4V3,
4118 .priv_data_size = sizeof(MpegEncContext),
4119 .init = ff_MPV_encode_init,
4120 .encode2 = ff_MPV_encode_picture,
4121 .close = ff_MPV_encode_end,
4122 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4123 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4124 .priv_class = &msmpeg4v3_class,
4127 FF_MPV_GENERIC_CLASS(wmv1)
4129 AVCodec ff_wmv1_encoder = {
4131 .type = AVMEDIA_TYPE_VIDEO,
4132 .id = AV_CODEC_ID_WMV1,
4133 .priv_data_size = sizeof(MpegEncContext),
4134 .init = ff_MPV_encode_init,
4135 .encode2 = ff_MPV_encode_picture,
4136 .close = ff_MPV_encode_end,
4137 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4138 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4139 .priv_class = &wmv1_class,