2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
41 #include "mpegvideo.h"
49 #include "aandcttab.h"
51 #include "mpeg4video.h"
53 #include "bytestream.h"
56 static int encode_picture(MpegEncContext *s, int picture_number);
57 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
58 static int sse_mb(MpegEncContext *s);
59 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
60 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
62 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
63 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
65 const AVOption ff_mpv_generic_options[] = {
70 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
71 uint16_t (*qmat16)[2][64],
72 const uint16_t *quant_matrix,
73 int bias, int qmin, int qmax, int intra)
78 for (qscale = qmin; qscale <= qmax; qscale++) {
80 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
81 dsp->fdct == ff_jpeg_fdct_islow_10 ||
82 dsp->fdct == ff_faandct) {
83 for (i = 0; i < 64; i++) {
84 const int j = dsp->idct_permutation[i];
85 /* 16 <= qscale * quant_matrix[i] <= 7905
86 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
87 * 19952 <= x <= 249205026
88 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
89 * 3444240 >= (1 << 36) / (x) >= 275 */
91 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
92 (qscale * quant_matrix[j]));
94 } else if (dsp->fdct == ff_fdct_ifast) {
95 for (i = 0; i < 64; i++) {
96 const int j = dsp->idct_permutation[i];
97 /* 16 <= qscale * quant_matrix[i] <= 7905
98 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
99 * 19952 <= x <= 249205026
100 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
101 * 3444240 >= (1 << 36) / (x) >= 275 */
103 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
104 (ff_aanscales[i] * qscale *
108 for (i = 0; i < 64; i++) {
109 const int j = dsp->idct_permutation[i];
110 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
111 * Assume x = qscale * quant_matrix[i]
113 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
114 * so 32768 >= (1 << 19) / (x) >= 67 */
115 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
116 (qscale * quant_matrix[j]));
117 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
118 // (qscale * quant_matrix[i]);
119 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
120 (qscale * quant_matrix[j]);
122 if (qmat16[qscale][0][i] == 0 ||
123 qmat16[qscale][0][i] == 128 * 256)
124 qmat16[qscale][0][i] = 128 * 256 - 1;
125 qmat16[qscale][1][i] =
126 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
127 qmat16[qscale][0][i]);
131 for (i = intra; i < 64; i++) {
133 if (dsp->fdct == ff_fdct_ifast) {
134 max = (8191LL * ff_aanscales[i]) >> 14;
136 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
142 av_log(NULL, AV_LOG_INFO,
143 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
148 static inline void update_qscale(MpegEncContext *s)
150 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
151 (FF_LAMBDA_SHIFT + 7);
152 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
154 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
158 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
164 for (i = 0; i < 64; i++) {
165 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
172 * init s->current_picture.qscale_table from s->lambda_table
174 void ff_init_qscale_tab(MpegEncContext *s)
176 int8_t * const qscale_table = s->current_picture.qscale_table;
179 for (i = 0; i < s->mb_num; i++) {
180 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
181 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
182 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
187 static void update_duplicate_context_after_me(MpegEncContext *dst,
190 #define COPY(a) dst->a= src->a
192 COPY(current_picture);
198 COPY(picture_in_gop_number);
199 COPY(gop_picture_number);
200 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
201 COPY(progressive_frame); // FIXME don't set in encode_header
202 COPY(partitioned_frame); // FIXME don't set in encode_header
207 * Set the given MpegEncContext to defaults for encoding.
208 * the changed fields will not depend upon the prior state of the MpegEncContext.
210 static void MPV_encode_defaults(MpegEncContext *s)
213 ff_MPV_common_defaults(s);
215 for (i = -16; i < 16; i++) {
216 default_fcode_tab[i + MAX_MV] = 1;
218 s->me.mv_penalty = default_mv_penalty;
219 s->fcode_tab = default_fcode_tab;
221 s->input_picture_number = 0;
222 s->picture_in_gop_number = 0;
225 /* init video encoder */
226 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
228 MpegEncContext *s = avctx->priv_data;
230 int chroma_h_shift, chroma_v_shift;
232 MPV_encode_defaults(s);
234 switch (avctx->codec_id) {
235 case AV_CODEC_ID_MPEG2VIDEO:
236 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
237 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
238 av_log(avctx, AV_LOG_ERROR,
239 "only YUV420 and YUV422 are supported\n");
243 case AV_CODEC_ID_LJPEG:
244 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
245 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
246 avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
247 avctx->pix_fmt != AV_PIX_FMT_BGRA &&
248 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
249 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
250 avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
251 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
252 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
256 case AV_CODEC_ID_MJPEG:
257 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
258 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
259 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
260 avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
261 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
262 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
267 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
268 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
273 switch (avctx->pix_fmt) {
274 case AV_PIX_FMT_YUVJ422P:
275 case AV_PIX_FMT_YUV422P:
276 s->chroma_format = CHROMA_422;
278 case AV_PIX_FMT_YUVJ420P:
279 case AV_PIX_FMT_YUV420P:
281 s->chroma_format = CHROMA_420;
285 s->bit_rate = avctx->bit_rate;
286 s->width = avctx->width;
287 s->height = avctx->height;
288 if (avctx->gop_size > 600 &&
289 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
290 av_log(avctx, AV_LOG_ERROR,
291 "Warning keyframe interval too large! reducing it ...\n");
292 avctx->gop_size = 600;
294 s->gop_size = avctx->gop_size;
296 s->flags = avctx->flags;
297 s->flags2 = avctx->flags2;
298 if (avctx->max_b_frames > MAX_B_FRAMES) {
299 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
300 "is %d.\n", MAX_B_FRAMES);
302 s->max_b_frames = avctx->max_b_frames;
303 s->codec_id = avctx->codec->id;
304 s->strict_std_compliance = avctx->strict_std_compliance;
305 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
306 s->mpeg_quant = avctx->mpeg_quant;
307 s->rtp_mode = !!avctx->rtp_payload_size;
308 s->intra_dc_precision = avctx->intra_dc_precision;
309 s->user_specified_pts = AV_NOPTS_VALUE;
311 if (s->gop_size <= 1) {
318 s->me_method = avctx->me_method;
321 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
323 s->adaptive_quant = (s->avctx->lumi_masking ||
324 s->avctx->dark_masking ||
325 s->avctx->temporal_cplx_masking ||
326 s->avctx->spatial_cplx_masking ||
327 s->avctx->p_masking ||
328 s->avctx->border_masking ||
329 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
332 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
334 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
335 av_log(avctx, AV_LOG_ERROR,
336 "a vbv buffer size is needed, "
337 "for encoding with a maximum bitrate\n");
341 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
342 av_log(avctx, AV_LOG_INFO,
343 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
346 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
347 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
351 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
352 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
356 if (avctx->rc_max_rate &&
357 avctx->rc_max_rate == avctx->bit_rate &&
358 avctx->rc_max_rate != avctx->rc_min_rate) {
359 av_log(avctx, AV_LOG_INFO,
360 "impossible bitrate constraints, this will fail\n");
363 if (avctx->rc_buffer_size &&
364 avctx->bit_rate * (int64_t)avctx->time_base.num >
365 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
366 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
370 if (!s->fixed_qscale &&
371 avctx->bit_rate * av_q2d(avctx->time_base) >
372 avctx->bit_rate_tolerance) {
373 av_log(avctx, AV_LOG_ERROR,
374 "bitrate tolerance too small for bitrate\n");
378 if (s->avctx->rc_max_rate &&
379 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
380 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
381 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
382 90000LL * (avctx->rc_buffer_size - 1) >
383 s->avctx->rc_max_rate * 0xFFFFLL) {
384 av_log(avctx, AV_LOG_INFO,
385 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
386 "specified vbv buffer is too large for the given bitrate!\n");
389 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
390 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
391 s->codec_id != AV_CODEC_ID_FLV1) {
392 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
396 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
397 av_log(avctx, AV_LOG_ERROR,
398 "OBMC is only supported with simple mb decision\n");
402 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
403 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
407 if (s->max_b_frames &&
408 s->codec_id != AV_CODEC_ID_MPEG4 &&
409 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
410 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
411 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
415 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
416 s->codec_id == AV_CODEC_ID_H263 ||
417 s->codec_id == AV_CODEC_ID_H263P) &&
418 (avctx->sample_aspect_ratio.num > 255 ||
419 avctx->sample_aspect_ratio.den > 255)) {
420 av_log(avctx, AV_LOG_ERROR,
421 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
422 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
426 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
427 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
428 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
432 // FIXME mpeg2 uses that too
433 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
434 av_log(avctx, AV_LOG_ERROR,
435 "mpeg2 style quantization not supported by codec\n");
439 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
440 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
444 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
445 s->avctx->mb_decision != FF_MB_DECISION_RD) {
446 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
450 if (s->avctx->scenechange_threshold < 1000000000 &&
451 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
452 av_log(avctx, AV_LOG_ERROR,
453 "closed gop with scene change detection are not supported yet, "
454 "set threshold to 1000000000\n");
458 if (s->flags & CODEC_FLAG_LOW_DELAY) {
459 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
460 av_log(avctx, AV_LOG_ERROR,
461 "low delay forcing is only available for mpeg2\n");
464 if (s->max_b_frames != 0) {
465 av_log(avctx, AV_LOG_ERROR,
466 "b frames cannot be used with low delay\n");
471 if (s->q_scale_type == 1) {
472 if (avctx->qmax > 12) {
473 av_log(avctx, AV_LOG_ERROR,
474 "non linear quant only supports qmax <= 12 currently\n");
479 if (s->avctx->thread_count > 1 &&
480 s->codec_id != AV_CODEC_ID_MPEG4 &&
481 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
482 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
483 (s->codec_id != AV_CODEC_ID_H263P)) {
484 av_log(avctx, AV_LOG_ERROR,
485 "multi threaded encoding not supported by codec\n");
489 if (s->avctx->thread_count < 1) {
490 av_log(avctx, AV_LOG_ERROR,
491 "automatic thread number detection not supported by codec,"
496 if (s->avctx->thread_count > 1)
499 if (!avctx->time_base.den || !avctx->time_base.num) {
500 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
504 i = (INT_MAX / 2 + 128) >> 8;
505 if (avctx->mb_threshold >= i) {
506 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
511 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
512 av_log(avctx, AV_LOG_INFO,
513 "notice: b_frame_strategy only affects the first pass\n");
514 avctx->b_frame_strategy = 0;
517 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
519 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
520 avctx->time_base.den /= i;
521 avctx->time_base.num /= i;
525 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
526 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
527 // (a + x * 3 / 8) / x
528 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
529 s->inter_quant_bias = 0;
531 s->intra_quant_bias = 0;
533 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
536 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
537 s->intra_quant_bias = avctx->intra_quant_bias;
538 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
539 s->inter_quant_bias = avctx->inter_quant_bias;
541 av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
544 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
545 s->avctx->time_base.den > (1 << 16) - 1) {
546 av_log(avctx, AV_LOG_ERROR,
547 "timebase %d/%d not supported by MPEG 4 standard, "
548 "the maximum admitted value for the timebase denominator "
549 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
553 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
555 switch (avctx->codec->id) {
556 case AV_CODEC_ID_MPEG1VIDEO:
557 s->out_format = FMT_MPEG1;
558 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
559 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
561 case AV_CODEC_ID_MPEG2VIDEO:
562 s->out_format = FMT_MPEG1;
563 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
564 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
567 case AV_CODEC_ID_LJPEG:
568 case AV_CODEC_ID_MJPEG:
569 s->out_format = FMT_MJPEG;
570 s->intra_only = 1; /* force intra only for jpeg */
571 if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
572 avctx->pix_fmt == AV_PIX_FMT_BGRA) {
573 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
574 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
575 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
577 s->mjpeg_vsample[0] = 2;
578 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
579 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
580 s->mjpeg_hsample[0] = 2;
581 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
582 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
584 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
585 ff_mjpeg_encode_init(s) < 0)
590 case AV_CODEC_ID_H261:
591 if (!CONFIG_H261_ENCODER)
593 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
594 av_log(avctx, AV_LOG_ERROR,
595 "The specified picture size of %dx%d is not valid for the "
596 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
597 s->width, s->height);
600 s->out_format = FMT_H261;
604 case AV_CODEC_ID_H263:
605 if (!CONFIG_H263_ENCODER)
607 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
608 s->width, s->height) == 8) {
609 av_log(avctx, AV_LOG_INFO,
610 "The specified picture size of %dx%d is not valid for "
611 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
612 "352x288, 704x576, and 1408x1152."
613 "Try H.263+.\n", s->width, s->height);
616 s->out_format = FMT_H263;
620 case AV_CODEC_ID_H263P:
621 s->out_format = FMT_H263;
624 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
625 s->modified_quant = s->h263_aic;
626 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
627 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
630 /* These are just to be sure */
634 case AV_CODEC_ID_FLV1:
635 s->out_format = FMT_H263;
636 s->h263_flv = 2; /* format = 1; 11-bit codes */
637 s->unrestricted_mv = 1;
638 s->rtp_mode = 0; /* don't allow GOB */
642 case AV_CODEC_ID_RV10:
643 s->out_format = FMT_H263;
647 case AV_CODEC_ID_RV20:
648 s->out_format = FMT_H263;
651 s->modified_quant = 1;
655 s->unrestricted_mv = 0;
657 case AV_CODEC_ID_MPEG4:
658 s->out_format = FMT_H263;
660 s->unrestricted_mv = 1;
661 s->low_delay = s->max_b_frames ? 0 : 1;
662 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
664 case AV_CODEC_ID_MSMPEG4V2:
665 s->out_format = FMT_H263;
667 s->unrestricted_mv = 1;
668 s->msmpeg4_version = 2;
672 case AV_CODEC_ID_MSMPEG4V3:
673 s->out_format = FMT_H263;
675 s->unrestricted_mv = 1;
676 s->msmpeg4_version = 3;
677 s->flipflop_rounding = 1;
681 case AV_CODEC_ID_WMV1:
682 s->out_format = FMT_H263;
684 s->unrestricted_mv = 1;
685 s->msmpeg4_version = 4;
686 s->flipflop_rounding = 1;
690 case AV_CODEC_ID_WMV2:
691 s->out_format = FMT_H263;
693 s->unrestricted_mv = 1;
694 s->msmpeg4_version = 5;
695 s->flipflop_rounding = 1;
703 avctx->has_b_frames = !s->low_delay;
707 s->progressive_frame =
708 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
709 CODEC_FLAG_INTERLACED_ME) ||
713 if (ff_MPV_common_init(s) < 0)
717 ff_MPV_encode_init_x86(s);
719 s->avctx->coded_frame = &s->current_picture.f;
721 if (s->msmpeg4_version) {
722 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
723 2 * 2 * (MAX_LEVEL + 1) *
724 (MAX_RUN + 1) * 2 * sizeof(int), fail);
726 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
728 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
729 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
730 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
731 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
732 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
733 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
734 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
735 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
737 if (s->avctx->noise_reduction) {
738 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
739 2 * 64 * sizeof(uint16_t), fail);
742 ff_h263dsp_init(&s->h263dsp);
743 if (!s->dct_quantize)
744 s->dct_quantize = ff_dct_quantize_c;
746 s->denoise_dct = denoise_dct_c;
747 s->fast_dct_quantize = s->dct_quantize;
749 s->dct_quantize = dct_quantize_trellis_c;
751 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
752 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
754 s->quant_precision = 5;
756 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
757 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
759 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
760 ff_h261_encode_init(s);
761 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
762 ff_h263_encode_init(s);
763 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
764 ff_msmpeg4_encode_init(s);
765 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
766 && s->out_format == FMT_MPEG1)
767 ff_mpeg1_encode_init(s);
770 for (i = 0; i < 64; i++) {
771 int j = s->dsp.idct_permutation[i];
772 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
774 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
775 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
776 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
778 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
781 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
782 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
784 if (s->avctx->intra_matrix)
785 s->intra_matrix[j] = s->avctx->intra_matrix[i];
786 if (s->avctx->inter_matrix)
787 s->inter_matrix[j] = s->avctx->inter_matrix[i];
790 /* precompute matrix */
791 /* for mjpeg, we do include qscale in the matrix */
792 if (s->out_format != FMT_MJPEG) {
793 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
794 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
796 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
797 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
801 if (ff_rate_control_init(s) < 0)
804 #if FF_API_ERROR_RATE
805 FF_DISABLE_DEPRECATION_WARNINGS
806 if (avctx->error_rate)
807 s->error_rate = avctx->error_rate;
808 FF_ENABLE_DEPRECATION_WARNINGS;
811 if (avctx->b_frame_strategy == 2) {
812 for (i = 0; i < s->max_b_frames + 2; i++) {
813 s->tmp_frames[i] = av_frame_alloc();
814 if (!s->tmp_frames[i])
815 return AVERROR(ENOMEM);
817 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
818 s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
819 s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
821 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
829 ff_MPV_encode_end(avctx);
830 return AVERROR_UNKNOWN;
833 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
835 MpegEncContext *s = avctx->priv_data;
838 ff_rate_control_uninit(s);
840 ff_MPV_common_end(s);
841 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
842 s->out_format == FMT_MJPEG)
843 ff_mjpeg_encode_close(s);
845 av_freep(&avctx->extradata);
847 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
848 av_frame_free(&s->tmp_frames[i]);
853 static int get_sae(uint8_t *src, int ref, int stride)
858 for (y = 0; y < 16; y++) {
859 for (x = 0; x < 16; x++) {
860 acc += FFABS(src[x + y * stride] - ref);
867 static int get_intra_count(MpegEncContext *s, uint8_t *src,
868 uint8_t *ref, int stride)
876 for (y = 0; y < h; y += 16) {
877 for (x = 0; x < w; x += 16) {
878 int offset = x + y * stride;
879 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
881 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
882 int sae = get_sae(src + offset, mean, stride);
884 acc += sae + 500 < sad;
891 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
895 int i, display_picture_number = 0, ret;
896 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
897 (s->low_delay ? 0 : 1);
902 display_picture_number = s->input_picture_number++;
904 if (pts != AV_NOPTS_VALUE) {
905 if (s->user_specified_pts != AV_NOPTS_VALUE) {
907 int64_t last = s->user_specified_pts;
910 av_log(s->avctx, AV_LOG_ERROR,
911 "Error, Invalid timestamp=%"PRId64", "
912 "last=%"PRId64"\n", pts, s->user_specified_pts);
916 if (!s->low_delay && display_picture_number == 1)
917 s->dts_delta = time - last;
919 s->user_specified_pts = pts;
921 if (s->user_specified_pts != AV_NOPTS_VALUE) {
922 s->user_specified_pts =
923 pts = s->user_specified_pts + 1;
924 av_log(s->avctx, AV_LOG_INFO,
925 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
928 pts = display_picture_number;
934 if (!pic_arg->buf[0]);
936 if (pic_arg->linesize[0] != s->linesize)
938 if (pic_arg->linesize[1] != s->uvlinesize)
940 if (pic_arg->linesize[2] != s->uvlinesize)
943 av_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
944 pic_arg->linesize[1], s->linesize, s->uvlinesize);
947 i = ff_find_unused_picture(s, 1);
951 pic = &s->picture[i];
954 if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
956 if (ff_alloc_picture(s, pic, 1) < 0) {
960 i = ff_find_unused_picture(s, 0);
964 pic = &s->picture[i];
967 if (ff_alloc_picture(s, pic, 0) < 0) {
971 if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
972 pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
973 pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
976 int h_chroma_shift, v_chroma_shift;
977 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
981 for (i = 0; i < 3; i++) {
982 int src_stride = pic_arg->linesize[i];
983 int dst_stride = i ? s->uvlinesize : s->linesize;
984 int h_shift = i ? h_chroma_shift : 0;
985 int v_shift = i ? v_chroma_shift : 0;
986 int w = s->width >> h_shift;
987 int h = s->height >> v_shift;
988 uint8_t *src = pic_arg->data[i];
989 uint8_t *dst = pic->f.data[i];
991 if (!s->avctx->rc_buffer_size)
992 dst += INPLACE_OFFSET;
994 if (src_stride == dst_stride)
995 memcpy(dst, src, src_stride * h);
1006 ret = av_frame_copy_props(&pic->f, pic_arg);
1010 pic->f.display_picture_number = display_picture_number;
1011 pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
1014 /* shift buffer entries */
1015 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1016 s->input_picture[i - 1] = s->input_picture[i];
1018 s->input_picture[encoding_delay] = (Picture*) pic;
1023 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1027 int64_t score64 = 0;
1029 for (plane = 0; plane < 3; plane++) {
1030 const int stride = p->f.linesize[plane];
1031 const int bw = plane ? 1 : 2;
1032 for (y = 0; y < s->mb_height * bw; y++) {
1033 for (x = 0; x < s->mb_width * bw; x++) {
1034 int off = p->shared ? 0 : 16;
1035 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1036 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1037 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1039 switch (s->avctx->frame_skip_exp) {
1040 case 0: score = FFMAX(score, v); break;
1041 case 1: score += FFABS(v); break;
1042 case 2: score += v * v; break;
1043 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1044 case 4: score64 += v * v * (int64_t)(v * v); break;
1053 if (score64 < s->avctx->frame_skip_threshold)
1055 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1060 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1062 AVPacket pkt = { 0 };
1063 int ret, got_output;
1065 av_init_packet(&pkt);
1066 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1071 av_free_packet(&pkt);
1075 static int estimate_best_b_count(MpegEncContext *s)
1077 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1078 AVCodecContext *c = avcodec_alloc_context3(NULL);
1079 const int scale = s->avctx->brd_scale;
1080 int i, j, out_size, p_lambda, b_lambda, lambda2;
1081 int64_t best_rd = INT64_MAX;
1082 int best_b_count = -1;
1084 assert(scale >= 0 && scale <= 3);
1087 //s->next_picture_ptr->quality;
1088 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1089 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1090 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1091 if (!b_lambda) // FIXME we should do this somewhere else
1092 b_lambda = p_lambda;
1093 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1096 c->width = s->width >> scale;
1097 c->height = s->height >> scale;
1098 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1099 CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1100 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1101 c->mb_decision = s->avctx->mb_decision;
1102 c->me_cmp = s->avctx->me_cmp;
1103 c->mb_cmp = s->avctx->mb_cmp;
1104 c->me_sub_cmp = s->avctx->me_sub_cmp;
1105 c->pix_fmt = AV_PIX_FMT_YUV420P;
1106 c->time_base = s->avctx->time_base;
1107 c->max_b_frames = s->max_b_frames;
1109 if (avcodec_open2(c, codec, NULL) < 0)
1112 for (i = 0; i < s->max_b_frames + 2; i++) {
1113 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1114 s->next_picture_ptr;
1116 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1117 pre_input = *pre_input_ptr;
1119 if (!pre_input.shared && i) {
1120 pre_input.f.data[0] += INPLACE_OFFSET;
1121 pre_input.f.data[1] += INPLACE_OFFSET;
1122 pre_input.f.data[2] += INPLACE_OFFSET;
1125 s->dsp.shrink[scale](s->tmp_frames[i]->data[0], s->tmp_frames[i]->linesize[0],
1126 pre_input.f.data[0], pre_input.f.linesize[0],
1127 c->width, c->height);
1128 s->dsp.shrink[scale](s->tmp_frames[i]->data[1], s->tmp_frames[i]->linesize[1],
1129 pre_input.f.data[1], pre_input.f.linesize[1],
1130 c->width >> 1, c->height >> 1);
1131 s->dsp.shrink[scale](s->tmp_frames[i]->data[2], s->tmp_frames[i]->linesize[2],
1132 pre_input.f.data[2], pre_input.f.linesize[2],
1133 c->width >> 1, c->height >> 1);
1137 for (j = 0; j < s->max_b_frames + 1; j++) {
1140 if (!s->input_picture[j])
1143 c->error[0] = c->error[1] = c->error[2] = 0;
1145 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1146 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1148 out_size = encode_frame(c, s->tmp_frames[0]);
1150 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1152 for (i = 0; i < s->max_b_frames + 1; i++) {
1153 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1155 s->tmp_frames[i + 1]->pict_type = is_p ?
1156 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1157 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1159 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1161 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1164 /* get the delayed frames */
1166 out_size = encode_frame(c, NULL);
1167 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1170 rd += c->error[0] + c->error[1] + c->error[2];
1181 return best_b_count;
1184 static int select_input_picture(MpegEncContext *s)
1188 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1189 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1190 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1192 /* set next picture type & ordering */
1193 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1194 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1195 s->next_picture_ptr == NULL || s->intra_only) {
1196 s->reordered_input_picture[0] = s->input_picture[0];
1197 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1198 s->reordered_input_picture[0]->f.coded_picture_number =
1199 s->coded_picture_number++;
1203 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1204 if (s->picture_in_gop_number < s->gop_size &&
1205 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1206 // FIXME check that te gop check above is +-1 correct
1207 av_frame_unref(&s->input_picture[0]->f);
1210 ff_vbv_update(s, 0);
1216 if (s->flags & CODEC_FLAG_PASS2) {
1217 for (i = 0; i < s->max_b_frames + 1; i++) {
1218 int pict_num = s->input_picture[0]->f.display_picture_number + i;
1220 if (pict_num >= s->rc_context.num_entries)
1222 if (!s->input_picture[i]) {
1223 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1227 s->input_picture[i]->f.pict_type =
1228 s->rc_context.entry[pict_num].new_pict_type;
1232 if (s->avctx->b_frame_strategy == 0) {
1233 b_frames = s->max_b_frames;
1234 while (b_frames && !s->input_picture[b_frames])
1236 } else if (s->avctx->b_frame_strategy == 1) {
1237 for (i = 1; i < s->max_b_frames + 1; i++) {
1238 if (s->input_picture[i] &&
1239 s->input_picture[i]->b_frame_score == 0) {
1240 s->input_picture[i]->b_frame_score =
1242 s->input_picture[i ]->f.data[0],
1243 s->input_picture[i - 1]->f.data[0],
1247 for (i = 0; i < s->max_b_frames + 1; i++) {
1248 if (s->input_picture[i] == NULL ||
1249 s->input_picture[i]->b_frame_score - 1 >
1250 s->mb_num / s->avctx->b_sensitivity)
1254 b_frames = FFMAX(0, i - 1);
1257 for (i = 0; i < b_frames + 1; i++) {
1258 s->input_picture[i]->b_frame_score = 0;
1260 } else if (s->avctx->b_frame_strategy == 2) {
1261 b_frames = estimate_best_b_count(s);
1263 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1269 for (i = b_frames - 1; i >= 0; i--) {
1270 int type = s->input_picture[i]->f.pict_type;
1271 if (type && type != AV_PICTURE_TYPE_B)
1274 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1275 b_frames == s->max_b_frames) {
1276 av_log(s->avctx, AV_LOG_ERROR,
1277 "warning, too many b frames in a row\n");
1280 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1281 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1282 s->gop_size > s->picture_in_gop_number) {
1283 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1285 if (s->flags & CODEC_FLAG_CLOSED_GOP)
1287 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1291 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1292 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1295 s->reordered_input_picture[0] = s->input_picture[b_frames];
1296 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1297 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1298 s->reordered_input_picture[0]->f.coded_picture_number =
1299 s->coded_picture_number++;
1300 for (i = 0; i < b_frames; i++) {
1301 s->reordered_input_picture[i + 1] = s->input_picture[i];
1302 s->reordered_input_picture[i + 1]->f.pict_type =
1304 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1305 s->coded_picture_number++;
1310 if (s->reordered_input_picture[0]) {
1311 s->reordered_input_picture[0]->reference =
1312 s->reordered_input_picture[0]->f.pict_type !=
1313 AV_PICTURE_TYPE_B ? 3 : 0;
1315 ff_mpeg_unref_picture(s, &s->new_picture);
1316 if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1319 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1320 // input is a shared pix, so we can't modifiy it -> alloc a new
1321 // one & ensure that the shared one is reuseable
1324 int i = ff_find_unused_picture(s, 0);
1327 pic = &s->picture[i];
1329 pic->reference = s->reordered_input_picture[0]->reference;
1330 if (ff_alloc_picture(s, pic, 0) < 0) {
1334 ret = av_frame_copy_props(&pic->f, &s->reordered_input_picture[0]->f);
1338 /* mark us unused / free shared pic */
1339 av_frame_unref(&s->reordered_input_picture[0]->f);
1340 s->reordered_input_picture[0]->shared = 0;
1342 s->current_picture_ptr = pic;
1344 // input is not a shared pix -> reuse buffer for current_pix
1345 s->current_picture_ptr = s->reordered_input_picture[0];
1346 for (i = 0; i < 4; i++) {
1347 s->new_picture.f.data[i] += INPLACE_OFFSET;
1350 ff_mpeg_unref_picture(s, &s->current_picture);
1351 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1352 s->current_picture_ptr)) < 0)
1355 s->picture_number = s->new_picture.f.display_picture_number;
1357 ff_mpeg_unref_picture(s, &s->new_picture);
1362 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1363 const AVFrame *pic_arg, int *got_packet)
1365 MpegEncContext *s = avctx->priv_data;
1366 int i, stuffing_count, ret;
1367 int context_count = s->slice_context_count;
1369 s->picture_in_gop_number++;
1371 if (load_input_picture(s, pic_arg) < 0)
1374 if (select_input_picture(s) < 0) {
1379 if (s->new_picture.f.data[0]) {
1381 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1384 s->mb_info_ptr = av_packet_new_side_data(pkt,
1385 AV_PKT_DATA_H263_MB_INFO,
1386 s->mb_width*s->mb_height*12);
1387 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1390 for (i = 0; i < context_count; i++) {
1391 int start_y = s->thread_context[i]->start_mb_y;
1392 int end_y = s->thread_context[i]-> end_mb_y;
1393 int h = s->mb_height;
1394 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1395 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1397 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1400 s->pict_type = s->new_picture.f.pict_type;
1402 ff_MPV_frame_start(s, avctx);
1404 if (encode_picture(s, s->picture_number) < 0)
1407 avctx->header_bits = s->header_bits;
1408 avctx->mv_bits = s->mv_bits;
1409 avctx->misc_bits = s->misc_bits;
1410 avctx->i_tex_bits = s->i_tex_bits;
1411 avctx->p_tex_bits = s->p_tex_bits;
1412 avctx->i_count = s->i_count;
1413 // FIXME f/b_count in avctx
1414 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1415 avctx->skip_count = s->skip_count;
1417 ff_MPV_frame_end(s);
1419 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1420 ff_mjpeg_encode_picture_trailer(s);
1422 if (avctx->rc_buffer_size) {
1423 RateControlContext *rcc = &s->rc_context;
1424 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1426 if (put_bits_count(&s->pb) > max_size &&
1427 s->lambda < s->avctx->lmax) {
1428 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1429 (s->qscale + 1) / s->qscale);
1430 if (s->adaptive_quant) {
1432 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1433 s->lambda_table[i] =
1434 FFMAX(s->lambda_table[i] + 1,
1435 s->lambda_table[i] * (s->qscale + 1) /
1438 s->mb_skipped = 0; // done in MPV_frame_start()
1439 // done in encode_picture() so we must undo it
1440 if (s->pict_type == AV_PICTURE_TYPE_P) {
1441 if (s->flipflop_rounding ||
1442 s->codec_id == AV_CODEC_ID_H263P ||
1443 s->codec_id == AV_CODEC_ID_MPEG4)
1444 s->no_rounding ^= 1;
1446 if (s->pict_type != AV_PICTURE_TYPE_B) {
1447 s->time_base = s->last_time_base;
1448 s->last_non_b_time = s->time - s->pp_time;
1450 for (i = 0; i < context_count; i++) {
1451 PutBitContext *pb = &s->thread_context[i]->pb;
1452 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1457 assert(s->avctx->rc_max_rate);
1460 if (s->flags & CODEC_FLAG_PASS1)
1461 ff_write_pass1_stats(s);
1463 for (i = 0; i < 4; i++) {
1464 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1465 avctx->error[i] += s->current_picture_ptr->f.error[i];
1468 if (s->flags & CODEC_FLAG_PASS1)
1469 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1470 avctx->i_tex_bits + avctx->p_tex_bits ==
1471 put_bits_count(&s->pb));
1472 flush_put_bits(&s->pb);
1473 s->frame_bits = put_bits_count(&s->pb);
1475 stuffing_count = ff_vbv_update(s, s->frame_bits);
1476 if (stuffing_count) {
1477 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1478 stuffing_count + 50) {
1479 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1483 switch (s->codec_id) {
1484 case AV_CODEC_ID_MPEG1VIDEO:
1485 case AV_CODEC_ID_MPEG2VIDEO:
1486 while (stuffing_count--) {
1487 put_bits(&s->pb, 8, 0);
1490 case AV_CODEC_ID_MPEG4:
1491 put_bits(&s->pb, 16, 0);
1492 put_bits(&s->pb, 16, 0x1C3);
1493 stuffing_count -= 4;
1494 while (stuffing_count--) {
1495 put_bits(&s->pb, 8, 0xFF);
1499 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1501 flush_put_bits(&s->pb);
1502 s->frame_bits = put_bits_count(&s->pb);
1505 /* update mpeg1/2 vbv_delay for CBR */
1506 if (s->avctx->rc_max_rate &&
1507 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1508 s->out_format == FMT_MPEG1 &&
1509 90000LL * (avctx->rc_buffer_size - 1) <=
1510 s->avctx->rc_max_rate * 0xFFFFLL) {
1511 int vbv_delay, min_delay;
1512 double inbits = s->avctx->rc_max_rate *
1513 av_q2d(s->avctx->time_base);
1514 int minbits = s->frame_bits - 8 *
1515 (s->vbv_delay_ptr - s->pb.buf - 1);
1516 double bits = s->rc_context.buffer_index + minbits - inbits;
1519 av_log(s->avctx, AV_LOG_ERROR,
1520 "Internal error, negative bits\n");
1522 assert(s->repeat_first_field == 0);
1524 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1525 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1526 s->avctx->rc_max_rate;
1528 vbv_delay = FFMAX(vbv_delay, min_delay);
1530 assert(vbv_delay < 0xFFFF);
1532 s->vbv_delay_ptr[0] &= 0xF8;
1533 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1534 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1535 s->vbv_delay_ptr[2] &= 0x07;
1536 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1537 avctx->vbv_delay = vbv_delay * 300;
1539 s->total_bits += s->frame_bits;
1540 avctx->frame_bits = s->frame_bits;
1542 pkt->pts = s->current_picture.f.pts;
1543 if (!s->low_delay) {
1544 if (!s->current_picture.f.coded_picture_number)
1545 pkt->dts = pkt->pts - s->dts_delta;
1547 pkt->dts = s->reordered_pts;
1548 s->reordered_pts = s->input_picture[0]->f.pts;
1550 pkt->dts = pkt->pts;
1551 if (s->current_picture.f.key_frame)
1552 pkt->flags |= AV_PKT_FLAG_KEY;
1554 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1558 assert((s->frame_bits & 7) == 0);
1560 pkt->size = s->frame_bits / 8;
1561 *got_packet = !!pkt->size;
1565 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1566 int n, int threshold)
1568 static const char tab[64] = {
1569 3, 2, 2, 1, 1, 1, 1, 1,
1570 1, 1, 1, 1, 1, 1, 1, 1,
1571 1, 1, 1, 1, 1, 1, 1, 1,
1572 0, 0, 0, 0, 0, 0, 0, 0,
1573 0, 0, 0, 0, 0, 0, 0, 0,
1574 0, 0, 0, 0, 0, 0, 0, 0,
1575 0, 0, 0, 0, 0, 0, 0, 0,
1576 0, 0, 0, 0, 0, 0, 0, 0
1581 int16_t *block = s->block[n];
1582 const int last_index = s->block_last_index[n];
1585 if (threshold < 0) {
1587 threshold = -threshold;
1591 /* Are all we could set to zero already zero? */
1592 if (last_index <= skip_dc - 1)
1595 for (i = 0; i <= last_index; i++) {
1596 const int j = s->intra_scantable.permutated[i];
1597 const int level = FFABS(block[j]);
1599 if (skip_dc && i == 0)
1603 } else if (level > 1) {
1609 if (score >= threshold)
1611 for (i = skip_dc; i <= last_index; i++) {
1612 const int j = s->intra_scantable.permutated[i];
1616 s->block_last_index[n] = 0;
1618 s->block_last_index[n] = -1;
1621 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1625 const int maxlevel = s->max_qcoeff;
1626 const int minlevel = s->min_qcoeff;
1630 i = 1; // skip clipping of intra dc
1634 for (; i <= last_index; i++) {
1635 const int j = s->intra_scantable.permutated[i];
1636 int level = block[j];
1638 if (level > maxlevel) {
1641 } else if (level < minlevel) {
1649 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1650 av_log(s->avctx, AV_LOG_INFO,
1651 "warning, clipping %d dct coefficients to %d..%d\n",
1652 overflow, minlevel, maxlevel);
1655 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1659 for (y = 0; y < 8; y++) {
1660 for (x = 0; x < 8; x++) {
1666 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1667 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1668 int v = ptr[x2 + y2 * stride];
1674 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1679 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1680 int motion_x, int motion_y,
1681 int mb_block_height,
1684 int16_t weight[8][64];
1685 int16_t orig[8][64];
1686 const int mb_x = s->mb_x;
1687 const int mb_y = s->mb_y;
1690 int dct_offset = s->linesize * 8; // default for progressive frames
1691 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1692 ptrdiff_t wrap_y, wrap_c;
1694 for (i = 0; i < mb_block_count; i++)
1695 skip_dct[i] = s->skipdct;
1697 if (s->adaptive_quant) {
1698 const int last_qp = s->qscale;
1699 const int mb_xy = mb_x + mb_y * s->mb_stride;
1701 s->lambda = s->lambda_table[mb_xy];
1704 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1705 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1706 s->dquant = s->qscale - last_qp;
1708 if (s->out_format == FMT_H263) {
1709 s->dquant = av_clip(s->dquant, -2, 2);
1711 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1713 if (s->pict_type == AV_PICTURE_TYPE_B) {
1714 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1717 if (s->mv_type == MV_TYPE_8X8)
1723 ff_set_qscale(s, last_qp + s->dquant);
1724 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1725 ff_set_qscale(s, s->qscale + s->dquant);
1727 wrap_y = s->linesize;
1728 wrap_c = s->uvlinesize;
1729 ptr_y = s->new_picture.f.data[0] +
1730 (mb_y * 16 * wrap_y) + mb_x * 16;
1731 ptr_cb = s->new_picture.f.data[1] +
1732 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1733 ptr_cr = s->new_picture.f.data[2] +
1734 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1736 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1737 uint8_t *ebuf = s->edge_emu_buffer + 32;
1738 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
1740 16, 16, mb_x * 16, mb_y * 16,
1741 s->width, s->height);
1743 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb,
1745 8, mb_block_height, mb_x * 8, mb_y * 8,
1746 s->width >> 1, s->height >> 1);
1747 ptr_cb = ebuf + 18 * wrap_y;
1748 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr,
1750 8, mb_block_height, mb_x * 8, mb_y * 8,
1751 s->width >> 1, s->height >> 1);
1752 ptr_cr = ebuf + 18 * wrap_y + 8;
1756 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1757 int progressive_score, interlaced_score;
1759 s->interlaced_dct = 0;
1760 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1762 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1763 NULL, wrap_y, 8) - 400;
1765 if (progressive_score > 0) {
1766 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1767 NULL, wrap_y * 2, 8) +
1768 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1769 NULL, wrap_y * 2, 8);
1770 if (progressive_score > interlaced_score) {
1771 s->interlaced_dct = 1;
1773 dct_offset = wrap_y;
1775 if (s->chroma_format == CHROMA_422)
1781 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1782 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1783 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1784 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1786 if (s->flags & CODEC_FLAG_GRAY) {
1790 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1791 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1792 if (!s->chroma_y_shift) { /* 422 */
1793 s->dsp.get_pixels(s->block[6],
1794 ptr_cb + (dct_offset >> 1), wrap_c);
1795 s->dsp.get_pixels(s->block[7],
1796 ptr_cr + (dct_offset >> 1), wrap_c);
1800 op_pixels_func (*op_pix)[4];
1801 qpel_mc_func (*op_qpix)[16];
1802 uint8_t *dest_y, *dest_cb, *dest_cr;
1804 dest_y = s->dest[0];
1805 dest_cb = s->dest[1];
1806 dest_cr = s->dest[2];
1808 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1809 op_pix = s->hdsp.put_pixels_tab;
1810 op_qpix = s->dsp.put_qpel_pixels_tab;
1812 op_pix = s->hdsp.put_no_rnd_pixels_tab;
1813 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1816 if (s->mv_dir & MV_DIR_FORWARD) {
1817 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1818 s->last_picture.f.data,
1820 op_pix = s->hdsp.avg_pixels_tab;
1821 op_qpix = s->dsp.avg_qpel_pixels_tab;
1823 if (s->mv_dir & MV_DIR_BACKWARD) {
1824 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1825 s->next_picture.f.data,
1829 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1830 int progressive_score, interlaced_score;
1832 s->interlaced_dct = 0;
1833 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1836 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1837 ptr_y + wrap_y * 8, wrap_y,
1840 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1841 progressive_score -= 400;
1843 if (progressive_score > 0) {
1844 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1847 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1851 if (progressive_score > interlaced_score) {
1852 s->interlaced_dct = 1;
1854 dct_offset = wrap_y;
1856 if (s->chroma_format == CHROMA_422)
1862 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1863 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1864 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1865 dest_y + dct_offset, wrap_y);
1866 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1867 dest_y + dct_offset + 8, wrap_y);
1869 if (s->flags & CODEC_FLAG_GRAY) {
1873 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1874 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1875 if (!s->chroma_y_shift) { /* 422 */
1876 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1877 dest_cb + (dct_offset >> 1), wrap_c);
1878 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1879 dest_cr + (dct_offset >> 1), wrap_c);
1882 /* pre quantization */
1883 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1884 2 * s->qscale * s->qscale) {
1886 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1887 wrap_y, 8) < 20 * s->qscale)
1889 if (s->dsp.sad[1](NULL, ptr_y + 8,
1890 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1892 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1893 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1895 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1896 dest_y + dct_offset + 8,
1897 wrap_y, 8) < 20 * s->qscale)
1899 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1900 wrap_c, 8) < 20 * s->qscale)
1902 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1903 wrap_c, 8) < 20 * s->qscale)
1905 if (!s->chroma_y_shift) { /* 422 */
1906 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1907 dest_cb + (dct_offset >> 1),
1908 wrap_c, 8) < 20 * s->qscale)
1910 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1911 dest_cr + (dct_offset >> 1),
1912 wrap_c, 8) < 20 * s->qscale)
1918 if (s->quantizer_noise_shaping) {
1920 get_visual_weight(weight[0], ptr_y , wrap_y);
1922 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
1924 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
1926 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1928 get_visual_weight(weight[4], ptr_cb , wrap_c);
1930 get_visual_weight(weight[5], ptr_cr , wrap_c);
1931 if (!s->chroma_y_shift) { /* 422 */
1933 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1936 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1939 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
1942 /* DCT & quantize */
1943 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1945 for (i = 0; i < mb_block_count; i++) {
1948 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1949 // FIXME we could decide to change to quantizer instead of
1951 // JS: I don't think that would be a good idea it could lower
1952 // quality instead of improve it. Just INTRADC clipping
1953 // deserves changes in quantizer
1955 clip_coeffs(s, s->block[i], s->block_last_index[i]);
1957 s->block_last_index[i] = -1;
1959 if (s->quantizer_noise_shaping) {
1960 for (i = 0; i < mb_block_count; i++) {
1962 s->block_last_index[i] =
1963 dct_quantize_refine(s, s->block[i], weight[i],
1964 orig[i], i, s->qscale);
1969 if (s->luma_elim_threshold && !s->mb_intra)
1970 for (i = 0; i < 4; i++)
1971 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1972 if (s->chroma_elim_threshold && !s->mb_intra)
1973 for (i = 4; i < mb_block_count; i++)
1974 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1976 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
1977 for (i = 0; i < mb_block_count; i++) {
1978 if (s->block_last_index[i] == -1)
1979 s->coded_score[i] = INT_MAX / 256;
1984 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
1985 s->block_last_index[4] =
1986 s->block_last_index[5] = 0;
1988 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
1991 // non c quantize code returns incorrect block_last_index FIXME
1992 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
1993 for (i = 0; i < mb_block_count; i++) {
1995 if (s->block_last_index[i] > 0) {
1996 for (j = 63; j > 0; j--) {
1997 if (s->block[i][s->intra_scantable.permutated[j]])
2000 s->block_last_index[i] = j;
2005 /* huffman encode */
2006 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2007 case AV_CODEC_ID_MPEG1VIDEO:
2008 case AV_CODEC_ID_MPEG2VIDEO:
2009 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2010 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2012 case AV_CODEC_ID_MPEG4:
2013 if (CONFIG_MPEG4_ENCODER)
2014 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2016 case AV_CODEC_ID_MSMPEG4V2:
2017 case AV_CODEC_ID_MSMPEG4V3:
2018 case AV_CODEC_ID_WMV1:
2019 if (CONFIG_MSMPEG4_ENCODER)
2020 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2022 case AV_CODEC_ID_WMV2:
2023 if (CONFIG_WMV2_ENCODER)
2024 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2026 case AV_CODEC_ID_H261:
2027 if (CONFIG_H261_ENCODER)
2028 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2030 case AV_CODEC_ID_H263:
2031 case AV_CODEC_ID_H263P:
2032 case AV_CODEC_ID_FLV1:
2033 case AV_CODEC_ID_RV10:
2034 case AV_CODEC_ID_RV20:
2035 if (CONFIG_H263_ENCODER)
2036 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2038 case AV_CODEC_ID_MJPEG:
2039 if (CONFIG_MJPEG_ENCODER)
2040 ff_mjpeg_encode_mb(s, s->block);
2047 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2049 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
2050 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2053 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2056 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2059 d->mb_skip_run= s->mb_skip_run;
2061 d->last_dc[i] = s->last_dc[i];
2064 d->mv_bits= s->mv_bits;
2065 d->i_tex_bits= s->i_tex_bits;
2066 d->p_tex_bits= s->p_tex_bits;
2067 d->i_count= s->i_count;
2068 d->f_count= s->f_count;
2069 d->b_count= s->b_count;
2070 d->skip_count= s->skip_count;
2071 d->misc_bits= s->misc_bits;
2075 d->qscale= s->qscale;
2076 d->dquant= s->dquant;
2078 d->esc3_level_length= s->esc3_level_length;
2081 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2084 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2085 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2088 d->mb_skip_run= s->mb_skip_run;
2090 d->last_dc[i] = s->last_dc[i];
2093 d->mv_bits= s->mv_bits;
2094 d->i_tex_bits= s->i_tex_bits;
2095 d->p_tex_bits= s->p_tex_bits;
2096 d->i_count= s->i_count;
2097 d->f_count= s->f_count;
2098 d->b_count= s->b_count;
2099 d->skip_count= s->skip_count;
2100 d->misc_bits= s->misc_bits;
2102 d->mb_intra= s->mb_intra;
2103 d->mb_skipped= s->mb_skipped;
2104 d->mv_type= s->mv_type;
2105 d->mv_dir= s->mv_dir;
2107 if(s->data_partitioning){
2109 d->tex_pb= s->tex_pb;
2113 d->block_last_index[i]= s->block_last_index[i];
2114 d->interlaced_dct= s->interlaced_dct;
2115 d->qscale= s->qscale;
2117 d->esc3_level_length= s->esc3_level_length;
2120 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2121 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2122 int *dmin, int *next_block, int motion_x, int motion_y)
2125 uint8_t *dest_backup[3];
2127 copy_context_before_encode(s, backup, type);
2129 s->block= s->blocks[*next_block];
2130 s->pb= pb[*next_block];
2131 if(s->data_partitioning){
2132 s->pb2 = pb2 [*next_block];
2133 s->tex_pb= tex_pb[*next_block];
2137 memcpy(dest_backup, s->dest, sizeof(s->dest));
2138 s->dest[0] = s->rd_scratchpad;
2139 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2140 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2141 assert(s->linesize >= 32); //FIXME
2144 encode_mb(s, motion_x, motion_y);
2146 score= put_bits_count(&s->pb);
2147 if(s->data_partitioning){
2148 score+= put_bits_count(&s->pb2);
2149 score+= put_bits_count(&s->tex_pb);
2152 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2153 ff_MPV_decode_mb(s, s->block);
2155 score *= s->lambda2;
2156 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2160 memcpy(s->dest, dest_backup, sizeof(s->dest));
2167 copy_context_after_encode(best, s, type);
2171 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2172 uint32_t *sq = ff_squareTbl + 256;
2177 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2178 else if(w==8 && h==8)
2179 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2183 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2192 static int sse_mb(MpegEncContext *s){
2196 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2197 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2200 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2201 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2202 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2203 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2205 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2206 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2207 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2210 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2211 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2212 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2215 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2216 MpegEncContext *s= *(void**)arg;
2220 s->me.dia_size= s->avctx->pre_dia_size;
2221 s->first_slice_line=1;
2222 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2223 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2224 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2226 s->first_slice_line=0;
2234 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2235 MpegEncContext *s= *(void**)arg;
2237 ff_check_alignment();
2239 s->me.dia_size= s->avctx->dia_size;
2240 s->first_slice_line=1;
2241 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2242 s->mb_x=0; //for block init below
2243 ff_init_block_index(s);
2244 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2245 s->block_index[0]+=2;
2246 s->block_index[1]+=2;
2247 s->block_index[2]+=2;
2248 s->block_index[3]+=2;
2250 /* compute motion vector & mb_type and store in context */
2251 if(s->pict_type==AV_PICTURE_TYPE_B)
2252 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2254 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2256 s->first_slice_line=0;
2261 static int mb_var_thread(AVCodecContext *c, void *arg){
2262 MpegEncContext *s= *(void**)arg;
2265 ff_check_alignment();
2267 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2268 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2271 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2273 int sum = s->dsp.pix_sum(pix, s->linesize);
2275 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2277 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2278 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2279 s->me.mb_var_sum_temp += varc;
2285 static void write_slice_end(MpegEncContext *s){
2286 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2287 if(s->partitioned_frame){
2288 ff_mpeg4_merge_partitions(s);
2291 ff_mpeg4_stuffing(&s->pb);
2292 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2293 ff_mjpeg_encode_stuffing(&s->pb);
2296 avpriv_align_put_bits(&s->pb);
2297 flush_put_bits(&s->pb);
2299 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2300 s->misc_bits+= get_bits_diff(s);
2303 static void write_mb_info(MpegEncContext *s)
2305 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2306 int offset = put_bits_count(&s->pb);
2307 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2308 int gobn = s->mb_y / s->gob_index;
2310 if (CONFIG_H263_ENCODER)
2311 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2312 bytestream_put_le32(&ptr, offset);
2313 bytestream_put_byte(&ptr, s->qscale);
2314 bytestream_put_byte(&ptr, gobn);
2315 bytestream_put_le16(&ptr, mba);
2316 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2317 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2318 /* 4MV not implemented */
2319 bytestream_put_byte(&ptr, 0); /* hmv2 */
2320 bytestream_put_byte(&ptr, 0); /* vmv2 */
2323 static void update_mb_info(MpegEncContext *s, int startcode)
2327 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2328 s->mb_info_size += 12;
2329 s->prev_mb_info = s->last_mb_info;
2332 s->prev_mb_info = put_bits_count(&s->pb)/8;
2333 /* This might have incremented mb_info_size above, and we return without
2334 * actually writing any info into that slot yet. But in that case,
2335 * this will be called again at the start of the after writing the
2336 * start code, actually writing the mb info. */
2340 s->last_mb_info = put_bits_count(&s->pb)/8;
2341 if (!s->mb_info_size)
2342 s->mb_info_size += 12;
2346 static int encode_thread(AVCodecContext *c, void *arg){
2347 MpegEncContext *s= *(void**)arg;
2348 int mb_x, mb_y, pdif = 0;
2349 int chr_h= 16>>s->chroma_y_shift;
2351 MpegEncContext best_s, backup_s;
2352 uint8_t bit_buf[2][MAX_MB_BYTES];
2353 uint8_t bit_buf2[2][MAX_MB_BYTES];
2354 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2355 PutBitContext pb[2], pb2[2], tex_pb[2];
2357 ff_check_alignment();
2360 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2361 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2362 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2365 s->last_bits= put_bits_count(&s->pb);
2376 /* init last dc values */
2377 /* note: quant matrix value (8) is implied here */
2378 s->last_dc[i] = 128 << s->intra_dc_precision;
2380 s->current_picture.f.error[i] = 0;
2383 memset(s->last_mv, 0, sizeof(s->last_mv));
2387 switch(s->codec_id){
2388 case AV_CODEC_ID_H263:
2389 case AV_CODEC_ID_H263P:
2390 case AV_CODEC_ID_FLV1:
2391 if (CONFIG_H263_ENCODER)
2392 s->gob_index = ff_h263_get_gob_height(s);
2394 case AV_CODEC_ID_MPEG4:
2395 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2396 ff_mpeg4_init_partitions(s);
2402 s->first_slice_line = 1;
2403 s->ptr_lastgob = s->pb.buf;
2404 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2408 ff_set_qscale(s, s->qscale);
2409 ff_init_block_index(s);
2411 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2412 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2413 int mb_type= s->mb_type[xy];
2418 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2419 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2422 if(s->data_partitioning){
2423 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2424 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2425 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2431 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2432 ff_update_block_index(s);
2434 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2435 ff_h261_reorder_mb_index(s);
2436 xy= s->mb_y*s->mb_stride + s->mb_x;
2437 mb_type= s->mb_type[xy];
2440 /* write gob / video packet header */
2442 int current_packet_size, is_gob_start;
2444 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2446 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2448 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2450 switch(s->codec_id){
2451 case AV_CODEC_ID_H263:
2452 case AV_CODEC_ID_H263P:
2453 if(!s->h263_slice_structured)
2454 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2456 case AV_CODEC_ID_MPEG2VIDEO:
2457 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2458 case AV_CODEC_ID_MPEG1VIDEO:
2459 if(s->mb_skip_run) is_gob_start=0;
2464 if(s->start_mb_y != mb_y || mb_x!=0){
2467 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2468 ff_mpeg4_init_partitions(s);
2472 assert((put_bits_count(&s->pb)&7) == 0);
2473 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2475 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2476 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2477 int d = 100 / s->error_rate;
2479 current_packet_size=0;
2480 s->pb.buf_ptr= s->ptr_lastgob;
2481 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2485 if (s->avctx->rtp_callback){
2486 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2487 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2489 update_mb_info(s, 1);
2491 switch(s->codec_id){
2492 case AV_CODEC_ID_MPEG4:
2493 if (CONFIG_MPEG4_ENCODER) {
2494 ff_mpeg4_encode_video_packet_header(s);
2495 ff_mpeg4_clean_buffers(s);
2498 case AV_CODEC_ID_MPEG1VIDEO:
2499 case AV_CODEC_ID_MPEG2VIDEO:
2500 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2501 ff_mpeg1_encode_slice_header(s);
2502 ff_mpeg1_clean_buffers(s);
2505 case AV_CODEC_ID_H263:
2506 case AV_CODEC_ID_H263P:
2507 if (CONFIG_H263_ENCODER)
2508 ff_h263_encode_gob_header(s, mb_y);
2512 if(s->flags&CODEC_FLAG_PASS1){
2513 int bits= put_bits_count(&s->pb);
2514 s->misc_bits+= bits - s->last_bits;
2518 s->ptr_lastgob += current_packet_size;
2519 s->first_slice_line=1;
2520 s->resync_mb_x=mb_x;
2521 s->resync_mb_y=mb_y;
2525 if( (s->resync_mb_x == s->mb_x)
2526 && s->resync_mb_y+1 == s->mb_y){
2527 s->first_slice_line=0;
2531 s->dquant=0; //only for QP_RD
2533 update_mb_info(s, 0);
2535 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2537 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2539 copy_context_before_encode(&backup_s, s, -1);
2541 best_s.data_partitioning= s->data_partitioning;
2542 best_s.partitioned_frame= s->partitioned_frame;
2543 if(s->data_partitioning){
2544 backup_s.pb2= s->pb2;
2545 backup_s.tex_pb= s->tex_pb;
2548 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2549 s->mv_dir = MV_DIR_FORWARD;
2550 s->mv_type = MV_TYPE_16X16;
2552 s->mv[0][0][0] = s->p_mv_table[xy][0];
2553 s->mv[0][0][1] = s->p_mv_table[xy][1];
2554 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2555 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2557 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2558 s->mv_dir = MV_DIR_FORWARD;
2559 s->mv_type = MV_TYPE_FIELD;
2562 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2563 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2564 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2566 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2567 &dmin, &next_block, 0, 0);
2569 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2570 s->mv_dir = MV_DIR_FORWARD;
2571 s->mv_type = MV_TYPE_16X16;
2575 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2576 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2578 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2579 s->mv_dir = MV_DIR_FORWARD;
2580 s->mv_type = MV_TYPE_8X8;
2583 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2584 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2586 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2587 &dmin, &next_block, 0, 0);
2589 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2590 s->mv_dir = MV_DIR_FORWARD;
2591 s->mv_type = MV_TYPE_16X16;
2593 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2594 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2595 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2596 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2598 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2599 s->mv_dir = MV_DIR_BACKWARD;
2600 s->mv_type = MV_TYPE_16X16;
2602 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2603 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2604 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2605 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2607 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2608 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2609 s->mv_type = MV_TYPE_16X16;
2611 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2612 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2613 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2614 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2615 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2616 &dmin, &next_block, 0, 0);
2618 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2619 s->mv_dir = MV_DIR_FORWARD;
2620 s->mv_type = MV_TYPE_FIELD;
2623 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2624 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2625 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2627 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2628 &dmin, &next_block, 0, 0);
2630 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2631 s->mv_dir = MV_DIR_BACKWARD;
2632 s->mv_type = MV_TYPE_FIELD;
2635 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2636 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2637 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2639 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2640 &dmin, &next_block, 0, 0);
2642 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2643 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2644 s->mv_type = MV_TYPE_FIELD;
2646 for(dir=0; dir<2; dir++){
2648 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2649 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2650 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2653 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2654 &dmin, &next_block, 0, 0);
2656 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2658 s->mv_type = MV_TYPE_16X16;
2662 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2663 &dmin, &next_block, 0, 0);
2664 if(s->h263_pred || s->h263_aic){
2666 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2668 ff_clean_intra_table_entries(s); //old mode?
2672 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2673 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2674 const int last_qp= backup_s.qscale;
2677 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2678 static const int dquant_tab[4]={-1,1,-2,2};
2680 assert(backup_s.dquant == 0);
2683 s->mv_dir= best_s.mv_dir;
2684 s->mv_type = MV_TYPE_16X16;
2685 s->mb_intra= best_s.mb_intra;
2686 s->mv[0][0][0] = best_s.mv[0][0][0];
2687 s->mv[0][0][1] = best_s.mv[0][0][1];
2688 s->mv[1][0][0] = best_s.mv[1][0][0];
2689 s->mv[1][0][1] = best_s.mv[1][0][1];
2691 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2692 for(; qpi<4; qpi++){
2693 int dquant= dquant_tab[qpi];
2694 qp= last_qp + dquant;
2695 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2697 backup_s.dquant= dquant;
2698 if(s->mb_intra && s->dc_val[0]){
2700 dc[i]= s->dc_val[0][ s->block_index[i] ];
2701 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2705 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2706 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2707 if(best_s.qscale != qp){
2708 if(s->mb_intra && s->dc_val[0]){
2710 s->dc_val[0][ s->block_index[i] ]= dc[i];
2711 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2718 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2719 int mx= s->b_direct_mv_table[xy][0];
2720 int my= s->b_direct_mv_table[xy][1];
2722 backup_s.dquant = 0;
2723 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2725 ff_mpeg4_set_direct_mv(s, mx, my);
2726 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2727 &dmin, &next_block, mx, my);
2729 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2730 backup_s.dquant = 0;
2731 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2733 ff_mpeg4_set_direct_mv(s, 0, 0);
2734 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2735 &dmin, &next_block, 0, 0);
2737 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2740 coded |= s->block_last_index[i];
2743 memcpy(s->mv, best_s.mv, sizeof(s->mv));
2744 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2745 mx=my=0; //FIXME find the one we actually used
2746 ff_mpeg4_set_direct_mv(s, mx, my);
2747 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2755 s->mv_dir= best_s.mv_dir;
2756 s->mv_type = best_s.mv_type;
2758 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2759 s->mv[0][0][1] = best_s.mv[0][0][1];
2760 s->mv[1][0][0] = best_s.mv[1][0][0];
2761 s->mv[1][0][1] = best_s.mv[1][0][1];*/
2764 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2765 &dmin, &next_block, mx, my);
2770 s->current_picture.qscale_table[xy] = best_s.qscale;
2772 copy_context_after_encode(s, &best_s, -1);
2774 pb_bits_count= put_bits_count(&s->pb);
2775 flush_put_bits(&s->pb);
2776 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2779 if(s->data_partitioning){
2780 pb2_bits_count= put_bits_count(&s->pb2);
2781 flush_put_bits(&s->pb2);
2782 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2783 s->pb2= backup_s.pb2;
2785 tex_pb_bits_count= put_bits_count(&s->tex_pb);
2786 flush_put_bits(&s->tex_pb);
2787 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2788 s->tex_pb= backup_s.tex_pb;
2790 s->last_bits= put_bits_count(&s->pb);
2792 if (CONFIG_H263_ENCODER &&
2793 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2794 ff_h263_update_motion_val(s);
2796 if(next_block==0){ //FIXME 16 vs linesize16
2797 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2798 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2799 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2802 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2803 ff_MPV_decode_mb(s, s->block);
2805 int motion_x = 0, motion_y = 0;
2806 s->mv_type=MV_TYPE_16X16;
2807 // only one MB-Type possible
2810 case CANDIDATE_MB_TYPE_INTRA:
2813 motion_x= s->mv[0][0][0] = 0;
2814 motion_y= s->mv[0][0][1] = 0;
2816 case CANDIDATE_MB_TYPE_INTER:
2817 s->mv_dir = MV_DIR_FORWARD;
2819 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2820 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2822 case CANDIDATE_MB_TYPE_INTER_I:
2823 s->mv_dir = MV_DIR_FORWARD;
2824 s->mv_type = MV_TYPE_FIELD;
2827 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2828 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2829 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2832 case CANDIDATE_MB_TYPE_INTER4V:
2833 s->mv_dir = MV_DIR_FORWARD;
2834 s->mv_type = MV_TYPE_8X8;
2837 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2838 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2841 case CANDIDATE_MB_TYPE_DIRECT:
2842 if (CONFIG_MPEG4_ENCODER) {
2843 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2845 motion_x=s->b_direct_mv_table[xy][0];
2846 motion_y=s->b_direct_mv_table[xy][1];
2847 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2850 case CANDIDATE_MB_TYPE_DIRECT0:
2851 if (CONFIG_MPEG4_ENCODER) {
2852 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2854 ff_mpeg4_set_direct_mv(s, 0, 0);
2857 case CANDIDATE_MB_TYPE_BIDIR:
2858 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2860 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2861 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2862 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2863 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2865 case CANDIDATE_MB_TYPE_BACKWARD:
2866 s->mv_dir = MV_DIR_BACKWARD;
2868 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2869 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2871 case CANDIDATE_MB_TYPE_FORWARD:
2872 s->mv_dir = MV_DIR_FORWARD;
2874 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2875 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2877 case CANDIDATE_MB_TYPE_FORWARD_I:
2878 s->mv_dir = MV_DIR_FORWARD;
2879 s->mv_type = MV_TYPE_FIELD;
2882 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2883 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2884 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2887 case CANDIDATE_MB_TYPE_BACKWARD_I:
2888 s->mv_dir = MV_DIR_BACKWARD;
2889 s->mv_type = MV_TYPE_FIELD;
2892 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2893 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2894 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2897 case CANDIDATE_MB_TYPE_BIDIR_I:
2898 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2899 s->mv_type = MV_TYPE_FIELD;
2901 for(dir=0; dir<2; dir++){
2903 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2904 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2905 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2910 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2913 encode_mb(s, motion_x, motion_y);
2915 // RAL: Update last macroblock type
2916 s->last_mv_dir = s->mv_dir;
2918 if (CONFIG_H263_ENCODER &&
2919 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2920 ff_h263_update_motion_val(s);
2922 ff_MPV_decode_mb(s, s->block);
2925 /* clean the MV table in IPS frames for direct mode in B frames */
2926 if(s->mb_intra /* && I,P,S_TYPE */){
2927 s->p_mv_table[xy][0]=0;
2928 s->p_mv_table[xy][1]=0;
2931 if(s->flags&CODEC_FLAG_PSNR){
2935 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2936 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2938 s->current_picture.f.error[0] += sse(
2939 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2940 s->dest[0], w, h, s->linesize);
2941 s->current_picture.f.error[1] += sse(
2942 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2943 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2944 s->current_picture.f.error[2] += sse(
2945 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2946 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2949 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2950 ff_h263_loop_filter(s);
2952 av_dlog(s->avctx, "MB %d %d bits\n",
2953 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
2957 //not beautiful here but we must write it before flushing so it has to be here
2958 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2959 ff_msmpeg4_encode_ext_header(s);
2963 /* Send the last GOB if RTP */
2964 if (s->avctx->rtp_callback) {
2965 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2966 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2967 /* Call the RTP callback to send the last GOB */
2969 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2975 #define MERGE(field) dst->field += src->field; src->field=0
2976 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2977 MERGE(me.scene_change_score);
2978 MERGE(me.mc_mb_var_sum_temp);
2979 MERGE(me.mb_var_sum_temp);
2982 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2985 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2986 MERGE(dct_count[1]);
2995 MERGE(er.error_count);
2996 MERGE(padding_bug_score);
2997 MERGE(current_picture.f.error[0]);
2998 MERGE(current_picture.f.error[1]);
2999 MERGE(current_picture.f.error[2]);
3001 if(dst->avctx->noise_reduction){
3002 for(i=0; i<64; i++){
3003 MERGE(dct_error_sum[0][i]);
3004 MERGE(dct_error_sum[1][i]);
3008 assert(put_bits_count(&src->pb) % 8 ==0);
3009 assert(put_bits_count(&dst->pb) % 8 ==0);
3010 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3011 flush_put_bits(&dst->pb);
3014 static int estimate_qp(MpegEncContext *s, int dry_run){
3015 if (s->next_lambda){
3016 s->current_picture_ptr->f.quality =
3017 s->current_picture.f.quality = s->next_lambda;
3018 if(!dry_run) s->next_lambda= 0;
3019 } else if (!s->fixed_qscale) {
3020 s->current_picture_ptr->f.quality =
3021 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
3022 if (s->current_picture.f.quality < 0)
3026 if(s->adaptive_quant){
3027 switch(s->codec_id){
3028 case AV_CODEC_ID_MPEG4:
3029 if (CONFIG_MPEG4_ENCODER)
3030 ff_clean_mpeg4_qscales(s);
3032 case AV_CODEC_ID_H263:
3033 case AV_CODEC_ID_H263P:
3034 case AV_CODEC_ID_FLV1:
3035 if (CONFIG_H263_ENCODER)
3036 ff_clean_h263_qscales(s);
3039 ff_init_qscale_tab(s);
3042 s->lambda= s->lambda_table[0];
3045 s->lambda = s->current_picture.f.quality;
3050 /* must be called before writing the header */
3051 static void set_frame_distances(MpegEncContext * s){
3052 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3053 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3055 if(s->pict_type==AV_PICTURE_TYPE_B){
3056 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3057 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3059 s->pp_time= s->time - s->last_non_b_time;
3060 s->last_non_b_time= s->time;
3061 assert(s->picture_number==0 || s->pp_time > 0);
3065 static int encode_picture(MpegEncContext *s, int picture_number)
3069 int context_count = s->slice_context_count;
3071 s->picture_number = picture_number;
3073 /* Reset the average MB variance */
3074 s->me.mb_var_sum_temp =
3075 s->me.mc_mb_var_sum_temp = 0;
3077 /* we need to initialize some time vars before we can encode b-frames */
3078 // RAL: Condition added for MPEG1VIDEO
3079 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3080 set_frame_distances(s);
3081 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3082 ff_set_mpeg4_time(s);
3084 s->me.scene_change_score=0;
3086 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3088 if(s->pict_type==AV_PICTURE_TYPE_I){
3089 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3090 else s->no_rounding=0;
3091 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3092 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3093 s->no_rounding ^= 1;
3096 if(s->flags & CODEC_FLAG_PASS2){
3097 if (estimate_qp(s,1) < 0)
3099 ff_get_2pass_fcode(s);
3100 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3101 if(s->pict_type==AV_PICTURE_TYPE_B)
3102 s->lambda= s->last_lambda_for[s->pict_type];
3104 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3108 s->mb_intra=0; //for the rate distortion & bit compare functions
3109 for(i=1; i<context_count; i++){
3110 ret = ff_update_duplicate_context(s->thread_context[i], s);
3118 /* Estimate motion for every MB */
3119 if(s->pict_type != AV_PICTURE_TYPE_I){
3120 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3121 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3122 if (s->pict_type != AV_PICTURE_TYPE_B) {
3123 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3124 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3128 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3129 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3131 for(i=0; i<s->mb_stride*s->mb_height; i++)
3132 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3134 if(!s->fixed_qscale){
3135 /* finding spatial complexity for I-frame rate control */
3136 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3139 for(i=1; i<context_count; i++){
3140 merge_context_after_me(s, s->thread_context[i]);
3142 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3143 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3146 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3147 s->pict_type= AV_PICTURE_TYPE_I;
3148 for(i=0; i<s->mb_stride*s->mb_height; i++)
3149 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3150 av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3151 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3155 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3156 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3158 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3160 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3161 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3162 s->f_code= FFMAX3(s->f_code, a, b);
3165 ff_fix_long_p_mvs(s);
3166 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3167 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3171 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3172 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3177 if(s->pict_type==AV_PICTURE_TYPE_B){
3180 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3181 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3182 s->f_code = FFMAX(a, b);
3184 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3185 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3186 s->b_code = FFMAX(a, b);
3188 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3189 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3190 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3191 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3192 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3194 for(dir=0; dir<2; dir++){
3197 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3198 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3199 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3200 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3208 if (estimate_qp(s, 0) < 0)
3211 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3212 s->qscale= 3; //reduce clipping problems
3214 if (s->out_format == FMT_MJPEG) {
3215 /* for mjpeg, we do include qscale in the matrix */
3217 int j= s->dsp.idct_permutation[i];
3219 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3221 s->y_dc_scale_table=
3222 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3223 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3224 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3225 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3229 //FIXME var duplication
3230 s->current_picture_ptr->f.key_frame =
3231 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3232 s->current_picture_ptr->f.pict_type =
3233 s->current_picture.f.pict_type = s->pict_type;
3235 if (s->current_picture.f.key_frame)
3236 s->picture_in_gop_number=0;
3238 s->last_bits= put_bits_count(&s->pb);
3239 switch(s->out_format) {
3241 if (CONFIG_MJPEG_ENCODER)
3242 ff_mjpeg_encode_picture_header(s);
3245 if (CONFIG_H261_ENCODER)
3246 ff_h261_encode_picture_header(s, picture_number);
3249 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3250 ff_wmv2_encode_picture_header(s, picture_number);
3251 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3252 ff_msmpeg4_encode_picture_header(s, picture_number);
3253 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3254 ff_mpeg4_encode_picture_header(s, picture_number);
3255 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3256 ff_rv10_encode_picture_header(s, picture_number);
3257 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3258 ff_rv20_encode_picture_header(s, picture_number);
3259 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3260 ff_flv_encode_picture_header(s, picture_number);
3261 else if (CONFIG_H263_ENCODER)
3262 ff_h263_encode_picture_header(s, picture_number);
3265 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3266 ff_mpeg1_encode_picture_header(s, picture_number);
3271 bits= put_bits_count(&s->pb);
3272 s->header_bits= bits - s->last_bits;
3274 for(i=1; i<context_count; i++){
3275 update_duplicate_context_after_me(s->thread_context[i], s);
3277 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3278 for(i=1; i<context_count; i++){
3279 merge_context_after_encode(s, s->thread_context[i]);
3285 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3286 const int intra= s->mb_intra;
3289 s->dct_count[intra]++;
3291 for(i=0; i<64; i++){
3292 int level= block[i];
3296 s->dct_error_sum[intra][i] += level;
3297 level -= s->dct_offset[intra][i];
3298 if(level<0) level=0;
3300 s->dct_error_sum[intra][i] -= level;
3301 level += s->dct_offset[intra][i];
3302 if(level>0) level=0;
3309 static int dct_quantize_trellis_c(MpegEncContext *s,
3310 int16_t *block, int n,
3311 int qscale, int *overflow){
3313 const uint8_t *scantable= s->intra_scantable.scantable;
3314 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3316 unsigned int threshold1, threshold2;
3328 int coeff_count[64];
3329 int qmul, qadd, start_i, last_non_zero, i, dc;
3330 const int esc_length= s->ac_esc_length;
3332 uint8_t * last_length;
3333 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3335 s->dsp.fdct (block);
3337 if(s->dct_error_sum)
3338 s->denoise_dct(s, block);
3340 qadd= ((qscale-1)|1)*8;
3351 /* For AIC we skip quant/dequant of INTRADC */
3356 /* note: block[0] is assumed to be positive */
3357 block[0] = (block[0] + (q >> 1)) / q;
3360 qmat = s->q_intra_matrix[qscale];
3361 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3362 bias= 1<<(QMAT_SHIFT-1);
3363 length = s->intra_ac_vlc_length;
3364 last_length= s->intra_ac_vlc_last_length;
3368 qmat = s->q_inter_matrix[qscale];
3369 length = s->inter_ac_vlc_length;
3370 last_length= s->inter_ac_vlc_last_length;
3374 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3375 threshold2= (threshold1<<1);
3377 for(i=63; i>=start_i; i--) {
3378 const int j = scantable[i];
3379 int level = block[j] * qmat[j];
3381 if(((unsigned)(level+threshold1))>threshold2){
3387 for(i=start_i; i<=last_non_zero; i++) {
3388 const int j = scantable[i];
3389 int level = block[j] * qmat[j];
3391 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3392 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3393 if(((unsigned)(level+threshold1))>threshold2){
3395 level= (bias + level)>>QMAT_SHIFT;
3397 coeff[1][i]= level-1;
3398 // coeff[2][k]= level-2;
3400 level= (bias - level)>>QMAT_SHIFT;
3401 coeff[0][i]= -level;
3402 coeff[1][i]= -level+1;
3403 // coeff[2][k]= -level+2;
3405 coeff_count[i]= FFMIN(level, 2);
3406 assert(coeff_count[i]);
3409 coeff[0][i]= (level>>31)|1;
3414 *overflow= s->max_qcoeff < max; //overflow might have happened
3416 if(last_non_zero < start_i){
3417 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3418 return last_non_zero;
3421 score_tab[start_i]= 0;
3422 survivor[0]= start_i;
3425 for(i=start_i; i<=last_non_zero; i++){
3426 int level_index, j, zero_distortion;
3427 int dct_coeff= FFABS(block[ scantable[i] ]);
3428 int best_score=256*256*256*120;
3430 if (s->dsp.fdct == ff_fdct_ifast)
3431 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3432 zero_distortion= dct_coeff*dct_coeff;
3434 for(level_index=0; level_index < coeff_count[i]; level_index++){
3436 int level= coeff[level_index][i];
3437 const int alevel= FFABS(level);
3442 if(s->out_format == FMT_H263){
3443 unquant_coeff= alevel*qmul + qadd;
3445 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3447 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3448 unquant_coeff = (unquant_coeff - 1) | 1;
3450 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3451 unquant_coeff = (unquant_coeff - 1) | 1;
3456 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3458 if((level&(~127)) == 0){
3459 for(j=survivor_count-1; j>=0; j--){
3460 int run= i - survivor[j];
3461 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3462 score += score_tab[i-run];
3464 if(score < best_score){
3467 level_tab[i+1]= level-64;
3471 if(s->out_format == FMT_H263){
3472 for(j=survivor_count-1; j>=0; j--){
3473 int run= i - survivor[j];
3474 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3475 score += score_tab[i-run];
3476 if(score < last_score){
3479 last_level= level-64;
3485 distortion += esc_length*lambda;
3486 for(j=survivor_count-1; j>=0; j--){
3487 int run= i - survivor[j];
3488 int score= distortion + score_tab[i-run];
3490 if(score < best_score){
3493 level_tab[i+1]= level-64;
3497 if(s->out_format == FMT_H263){
3498 for(j=survivor_count-1; j>=0; j--){
3499 int run= i - survivor[j];
3500 int score= distortion + score_tab[i-run];
3501 if(score < last_score){
3504 last_level= level-64;
3512 score_tab[i+1]= best_score;
3514 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3515 if(last_non_zero <= 27){
3516 for(; survivor_count; survivor_count--){
3517 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3521 for(; survivor_count; survivor_count--){
3522 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3527 survivor[ survivor_count++ ]= i+1;
3530 if(s->out_format != FMT_H263){
3531 last_score= 256*256*256*120;
3532 for(i= survivor[0]; i<=last_non_zero + 1; i++){
3533 int score= score_tab[i];
3534 if(i) score += lambda*2; //FIXME exacter?
3536 if(score < last_score){
3539 last_level= level_tab[i];
3540 last_run= run_tab[i];
3545 s->coded_score[n] = last_score;
3547 dc= FFABS(block[0]);
3548 last_non_zero= last_i - 1;
3549 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3551 if(last_non_zero < start_i)
3552 return last_non_zero;
3554 if(last_non_zero == 0 && start_i == 0){
3556 int best_score= dc * dc;
3558 for(i=0; i<coeff_count[0]; i++){
3559 int level= coeff[i][0];
3560 int alevel= FFABS(level);
3561 int unquant_coeff, score, distortion;
3563 if(s->out_format == FMT_H263){
3564 unquant_coeff= (alevel*qmul + qadd)>>3;
3566 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3567 unquant_coeff = (unquant_coeff - 1) | 1;
3569 unquant_coeff = (unquant_coeff + 4) >> 3;
3570 unquant_coeff<<= 3 + 3;
3572 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3574 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3575 else score= distortion + esc_length*lambda;
3577 if(score < best_score){
3579 best_level= level - 64;
3582 block[0]= best_level;
3583 s->coded_score[n] = best_score - dc*dc;
3584 if(best_level == 0) return -1;
3585 else return last_non_zero;
3591 block[ perm_scantable[last_non_zero] ]= last_level;
3594 for(; i>start_i; i -= run_tab[i] + 1){
3595 block[ perm_scantable[i-1] ]= level_tab[i];
3598 return last_non_zero;
3601 //#define REFINE_STATS 1
3602 static int16_t basis[64][64];
3604 static void build_basis(uint8_t *perm){
3611 double s= 0.25*(1<<BASIS_SHIFT);
3613 int perm_index= perm[index];
3614 if(i==0) s*= sqrt(0.5);
3615 if(j==0) s*= sqrt(0.5);
3616 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3623 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3624 int16_t *block, int16_t *weight, int16_t *orig,
3627 LOCAL_ALIGNED_16(int16_t, d1, [64]);
3628 const uint8_t *scantable= s->intra_scantable.scantable;
3629 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3630 // unsigned int threshold1, threshold2;
3635 int qmul, qadd, start_i, last_non_zero, i, dc;
3637 uint8_t * last_length;
3639 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3642 static int after_last=0;
3643 static int to_zero=0;
3644 static int from_zero=0;
3647 static int messed_sign=0;
3650 if(basis[0][0] == 0)
3651 build_basis(s->dsp.idct_permutation);
3662 /* For AIC we skip quant/dequant of INTRADC */
3666 q <<= RECON_SHIFT-3;
3667 /* note: block[0] is assumed to be positive */
3669 // block[0] = (block[0] + (q >> 1)) / q;
3671 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3672 // bias= 1<<(QMAT_SHIFT-1);
3673 length = s->intra_ac_vlc_length;
3674 last_length= s->intra_ac_vlc_last_length;
3678 length = s->inter_ac_vlc_length;
3679 last_length= s->inter_ac_vlc_last_length;
3681 last_non_zero = s->block_last_index[n];
3686 dc += (1<<(RECON_SHIFT-1));
3687 for(i=0; i<64; i++){
3688 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3691 STOP_TIMER("memset rem[]")}
3694 for(i=0; i<64; i++){
3699 w= FFABS(weight[i]) + qns*one;
3700 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3703 // w=weight[i] = (63*qns + (w/2)) / w;
3709 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3715 for(i=start_i; i<=last_non_zero; i++){
3716 int j= perm_scantable[i];
3717 const int level= block[j];
3721 if(level<0) coeff= qmul*level - qadd;
3722 else coeff= qmul*level + qadd;
3723 run_tab[rle_index++]=run;
3726 s->dsp.add_8x8basis(rem, basis[j], coeff);
3732 if(last_non_zero>0){
3733 STOP_TIMER("init rem[]")
3740 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3743 int run2, best_unquant_change=0, analyze_gradient;
3747 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3749 if(analyze_gradient){
3753 for(i=0; i<64; i++){
3756 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3759 STOP_TIMER("rem*w*w")}
3769 const int level= block[0];
3770 int change, old_coeff;
3772 assert(s->mb_intra);
3776 for(change=-1; change<=1; change+=2){
3777 int new_level= level + change;
3778 int score, new_coeff;
3780 new_coeff= q*new_level;
3781 if(new_coeff >= 2048 || new_coeff < 0)
3784 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3785 if(score<best_score){
3788 best_change= change;
3789 best_unquant_change= new_coeff - old_coeff;
3796 run2= run_tab[rle_index++];
3800 for(i=start_i; i<64; i++){
3801 int j= perm_scantable[i];
3802 const int level= block[j];
3803 int change, old_coeff;
3805 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3809 if(level<0) old_coeff= qmul*level - qadd;
3810 else old_coeff= qmul*level + qadd;
3811 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3815 assert(run2>=0 || i >= last_non_zero );
3818 for(change=-1; change<=1; change+=2){
3819 int new_level= level + change;
3820 int score, new_coeff, unquant_change;
3823 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3827 if(new_level<0) new_coeff= qmul*new_level - qadd;
3828 else new_coeff= qmul*new_level + qadd;
3829 if(new_coeff >= 2048 || new_coeff <= -2048)
3831 //FIXME check for overflow
3834 if(level < 63 && level > -63){
3835 if(i < last_non_zero)
3836 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
3837 - length[UNI_AC_ENC_INDEX(run, level+64)];
3839 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3840 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3843 assert(FFABS(new_level)==1);
3845 if(analyze_gradient){
3846 int g= d1[ scantable[i] ];
3847 if(g && (g^new_level) >= 0)
3851 if(i < last_non_zero){
3852 int next_i= i + run2 + 1;
3853 int next_level= block[ perm_scantable[next_i] ] + 64;
3855 if(next_level&(~127))
3858 if(next_i < last_non_zero)
3859 score += length[UNI_AC_ENC_INDEX(run, 65)]
3860 + length[UNI_AC_ENC_INDEX(run2, next_level)]
3861 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3863 score += length[UNI_AC_ENC_INDEX(run, 65)]
3864 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3865 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3867 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3869 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3870 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3876 assert(FFABS(level)==1);
3878 if(i < last_non_zero){
3879 int next_i= i + run2 + 1;
3880 int next_level= block[ perm_scantable[next_i] ] + 64;
3882 if(next_level&(~127))
3885 if(next_i < last_non_zero)
3886 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3887 - length[UNI_AC_ENC_INDEX(run2, next_level)]
3888 - length[UNI_AC_ENC_INDEX(run, 65)];
3890 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3891 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3892 - length[UNI_AC_ENC_INDEX(run, 65)];
3894 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3896 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3897 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3904 unquant_change= new_coeff - old_coeff;
3905 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3907 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3908 if(score<best_score){
3911 best_change= change;
3912 best_unquant_change= unquant_change;
3916 prev_level= level + 64;
3917 if(prev_level&(~127))
3926 STOP_TIMER("iterative step")}
3930 int j= perm_scantable[ best_coeff ];
3932 block[j] += best_change;
3934 if(best_coeff > last_non_zero){
3935 last_non_zero= best_coeff;
3943 if(block[j] - best_change){
3944 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
3956 for(; last_non_zero>=start_i; last_non_zero--){
3957 if(block[perm_scantable[last_non_zero]])
3963 if(256*256*256*64 % count == 0){
3964 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
3969 for(i=start_i; i<=last_non_zero; i++){
3970 int j= perm_scantable[i];
3971 const int level= block[j];
3974 run_tab[rle_index++]=run;
3981 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
3987 if(last_non_zero>0){
3988 STOP_TIMER("iterative search")
3993 return last_non_zero;
3996 int ff_dct_quantize_c(MpegEncContext *s,
3997 int16_t *block, int n,
3998 int qscale, int *overflow)
4000 int i, j, level, last_non_zero, q, start_i;
4002 const uint8_t *scantable= s->intra_scantable.scantable;
4005 unsigned int threshold1, threshold2;
4007 s->dsp.fdct (block);
4009 if(s->dct_error_sum)
4010 s->denoise_dct(s, block);
4020 /* For AIC we skip quant/dequant of INTRADC */
4023 /* note: block[0] is assumed to be positive */
4024 block[0] = (block[0] + (q >> 1)) / q;
4027 qmat = s->q_intra_matrix[qscale];
4028 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4032 qmat = s->q_inter_matrix[qscale];
4033 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4035 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4036 threshold2= (threshold1<<1);
4037 for(i=63;i>=start_i;i--) {
4039 level = block[j] * qmat[j];
4041 if(((unsigned)(level+threshold1))>threshold2){
4048 for(i=start_i; i<=last_non_zero; i++) {
4050 level = block[j] * qmat[j];
4052 // if( bias+level >= (1<<QMAT_SHIFT)
4053 // || bias-level >= (1<<QMAT_SHIFT)){
4054 if(((unsigned)(level+threshold1))>threshold2){
4056 level= (bias + level)>>QMAT_SHIFT;
4059 level= (bias - level)>>QMAT_SHIFT;
4067 *overflow= s->max_qcoeff < max; //overflow might have happened
4069 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4070 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4071 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4073 return last_non_zero;
4076 #define OFFSET(x) offsetof(MpegEncContext, x)
4077 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4078 static const AVOption h263_options[] = {
4079 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4080 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4081 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4086 static const AVClass h263_class = {
4087 .class_name = "H.263 encoder",
4088 .item_name = av_default_item_name,
4089 .option = h263_options,
4090 .version = LIBAVUTIL_VERSION_INT,
4093 AVCodec ff_h263_encoder = {
4095 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4096 .type = AVMEDIA_TYPE_VIDEO,
4097 .id = AV_CODEC_ID_H263,
4098 .priv_data_size = sizeof(MpegEncContext),
4099 .init = ff_MPV_encode_init,
4100 .encode2 = ff_MPV_encode_picture,
4101 .close = ff_MPV_encode_end,
4102 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4103 .priv_class = &h263_class,
4106 static const AVOption h263p_options[] = {
4107 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4108 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4109 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4110 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4114 static const AVClass h263p_class = {
4115 .class_name = "H.263p encoder",
4116 .item_name = av_default_item_name,
4117 .option = h263p_options,
4118 .version = LIBAVUTIL_VERSION_INT,
4121 AVCodec ff_h263p_encoder = {
4123 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4124 .type = AVMEDIA_TYPE_VIDEO,
4125 .id = AV_CODEC_ID_H263P,
4126 .priv_data_size = sizeof(MpegEncContext),
4127 .init = ff_MPV_encode_init,
4128 .encode2 = ff_MPV_encode_picture,
4129 .close = ff_MPV_encode_end,
4130 .capabilities = CODEC_CAP_SLICE_THREADS,
4131 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4132 .priv_class = &h263p_class,
4135 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4137 AVCodec ff_msmpeg4v2_encoder = {
4138 .name = "msmpeg4v2",
4139 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4140 .type = AVMEDIA_TYPE_VIDEO,
4141 .id = AV_CODEC_ID_MSMPEG4V2,
4142 .priv_data_size = sizeof(MpegEncContext),
4143 .init = ff_MPV_encode_init,
4144 .encode2 = ff_MPV_encode_picture,
4145 .close = ff_MPV_encode_end,
4146 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4147 .priv_class = &msmpeg4v2_class,
4150 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4152 AVCodec ff_msmpeg4v3_encoder = {
4154 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4155 .type = AVMEDIA_TYPE_VIDEO,
4156 .id = AV_CODEC_ID_MSMPEG4V3,
4157 .priv_data_size = sizeof(MpegEncContext),
4158 .init = ff_MPV_encode_init,
4159 .encode2 = ff_MPV_encode_picture,
4160 .close = ff_MPV_encode_end,
4161 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4162 .priv_class = &msmpeg4v3_class,
4165 FF_MPV_GENERIC_CLASS(wmv1)
4167 AVCodec ff_wmv1_encoder = {
4169 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4170 .type = AVMEDIA_TYPE_VIDEO,
4171 .id = AV_CODEC_ID_WMV1,
4172 .priv_data_size = sizeof(MpegEncContext),
4173 .init = ff_MPV_encode_init,
4174 .encode2 = ff_MPV_encode_picture,
4175 .close = ff_MPV_encode_end,
4176 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4177 .priv_class = &wmv1_class,