2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
41 #include "mpegvideo.h"
49 #include "aandcttab.h"
51 #include "mpeg4video.h"
53 #include "bytestream.h"
56 static int encode_picture(MpegEncContext *s, int picture_number);
57 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
58 static int sse_mb(MpegEncContext *s);
59 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
60 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
62 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
63 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
65 const AVOption ff_mpv_generic_options[] = {
70 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
71 uint16_t (*qmat16)[2][64],
72 const uint16_t *quant_matrix,
73 int bias, int qmin, int qmax, int intra)
78 for (qscale = qmin; qscale <= qmax; qscale++) {
80 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
81 dsp->fdct == ff_jpeg_fdct_islow_10 ||
82 dsp->fdct == ff_faandct) {
83 for (i = 0; i < 64; i++) {
84 const int j = dsp->idct_permutation[i];
85 /* 16 <= qscale * quant_matrix[i] <= 7905
86 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
87 * 19952 <= x <= 249205026
88 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
89 * 3444240 >= (1 << 36) / (x) >= 275 */
91 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
92 (qscale * quant_matrix[j]));
94 } else if (dsp->fdct == ff_fdct_ifast) {
95 for (i = 0; i < 64; i++) {
96 const int j = dsp->idct_permutation[i];
97 /* 16 <= qscale * quant_matrix[i] <= 7905
98 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
99 * 19952 <= x <= 249205026
100 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
101 * 3444240 >= (1 << 36) / (x) >= 275 */
103 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
104 (ff_aanscales[i] * qscale *
108 for (i = 0; i < 64; i++) {
109 const int j = dsp->idct_permutation[i];
110 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
111 * Assume x = qscale * quant_matrix[i]
113 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
114 * so 32768 >= (1 << 19) / (x) >= 67 */
115 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
116 (qscale * quant_matrix[j]));
117 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
118 // (qscale * quant_matrix[i]);
119 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
120 (qscale * quant_matrix[j]);
122 if (qmat16[qscale][0][i] == 0 ||
123 qmat16[qscale][0][i] == 128 * 256)
124 qmat16[qscale][0][i] = 128 * 256 - 1;
125 qmat16[qscale][1][i] =
126 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
127 qmat16[qscale][0][i]);
131 for (i = intra; i < 64; i++) {
133 if (dsp->fdct == ff_fdct_ifast) {
134 max = (8191LL * ff_aanscales[i]) >> 14;
136 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
142 av_log(NULL, AV_LOG_INFO,
143 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
148 static inline void update_qscale(MpegEncContext *s)
150 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
151 (FF_LAMBDA_SHIFT + 7);
152 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
154 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
158 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
164 for (i = 0; i < 64; i++) {
165 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
172 * init s->current_picture.qscale_table from s->lambda_table
174 void ff_init_qscale_tab(MpegEncContext *s)
176 int8_t * const qscale_table = s->current_picture.qscale_table;
179 for (i = 0; i < s->mb_num; i++) {
180 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
181 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
182 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
187 static void update_duplicate_context_after_me(MpegEncContext *dst,
190 #define COPY(a) dst->a= src->a
192 COPY(current_picture);
198 COPY(picture_in_gop_number);
199 COPY(gop_picture_number);
200 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
201 COPY(progressive_frame); // FIXME don't set in encode_header
202 COPY(partitioned_frame); // FIXME don't set in encode_header
207 * Set the given MpegEncContext to defaults for encoding.
208 * the changed fields will not depend upon the prior state of the MpegEncContext.
210 static void MPV_encode_defaults(MpegEncContext *s)
213 ff_MPV_common_defaults(s);
215 for (i = -16; i < 16; i++) {
216 default_fcode_tab[i + MAX_MV] = 1;
218 s->me.mv_penalty = default_mv_penalty;
219 s->fcode_tab = default_fcode_tab;
222 /* init video encoder */
223 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
225 MpegEncContext *s = avctx->priv_data;
227 int chroma_h_shift, chroma_v_shift;
229 MPV_encode_defaults(s);
231 switch (avctx->codec_id) {
232 case AV_CODEC_ID_MPEG2VIDEO:
233 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
234 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
235 av_log(avctx, AV_LOG_ERROR,
236 "only YUV420 and YUV422 are supported\n");
240 case AV_CODEC_ID_LJPEG:
241 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
242 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
243 avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
244 avctx->pix_fmt != AV_PIX_FMT_BGRA &&
245 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
246 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
247 avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
248 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
249 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
253 case AV_CODEC_ID_MJPEG:
254 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
255 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
256 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
257 avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
258 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
259 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
264 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
265 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
270 switch (avctx->pix_fmt) {
271 case AV_PIX_FMT_YUVJ422P:
272 case AV_PIX_FMT_YUV422P:
273 s->chroma_format = CHROMA_422;
275 case AV_PIX_FMT_YUVJ420P:
276 case AV_PIX_FMT_YUV420P:
278 s->chroma_format = CHROMA_420;
282 s->bit_rate = avctx->bit_rate;
283 s->width = avctx->width;
284 s->height = avctx->height;
285 if (avctx->gop_size > 600 &&
286 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
287 av_log(avctx, AV_LOG_ERROR,
288 "Warning keyframe interval too large! reducing it ...\n");
289 avctx->gop_size = 600;
291 s->gop_size = avctx->gop_size;
293 s->flags = avctx->flags;
294 s->flags2 = avctx->flags2;
295 if (avctx->max_b_frames > MAX_B_FRAMES) {
296 av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
297 "is %d.\n", MAX_B_FRAMES);
299 s->max_b_frames = avctx->max_b_frames;
300 s->codec_id = avctx->codec->id;
301 s->strict_std_compliance = avctx->strict_std_compliance;
302 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
303 s->mpeg_quant = avctx->mpeg_quant;
304 s->rtp_mode = !!avctx->rtp_payload_size;
305 s->intra_dc_precision = avctx->intra_dc_precision;
306 s->user_specified_pts = AV_NOPTS_VALUE;
308 if (s->gop_size <= 1) {
315 s->me_method = avctx->me_method;
318 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
320 s->adaptive_quant = (s->avctx->lumi_masking ||
321 s->avctx->dark_masking ||
322 s->avctx->temporal_cplx_masking ||
323 s->avctx->spatial_cplx_masking ||
324 s->avctx->p_masking ||
325 s->avctx->border_masking ||
326 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
329 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
331 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
332 av_log(avctx, AV_LOG_ERROR,
333 "a vbv buffer size is needed, "
334 "for encoding with a maximum bitrate\n");
338 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
339 av_log(avctx, AV_LOG_INFO,
340 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
343 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
344 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
348 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
349 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
353 if (avctx->rc_max_rate &&
354 avctx->rc_max_rate == avctx->bit_rate &&
355 avctx->rc_max_rate != avctx->rc_min_rate) {
356 av_log(avctx, AV_LOG_INFO,
357 "impossible bitrate constraints, this will fail\n");
360 if (avctx->rc_buffer_size &&
361 avctx->bit_rate * (int64_t)avctx->time_base.num >
362 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
363 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
367 if (!s->fixed_qscale &&
368 avctx->bit_rate * av_q2d(avctx->time_base) >
369 avctx->bit_rate_tolerance) {
370 av_log(avctx, AV_LOG_ERROR,
371 "bitrate tolerance too small for bitrate\n");
375 if (s->avctx->rc_max_rate &&
376 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
377 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
378 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
379 90000LL * (avctx->rc_buffer_size - 1) >
380 s->avctx->rc_max_rate * 0xFFFFLL) {
381 av_log(avctx, AV_LOG_INFO,
382 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
383 "specified vbv buffer is too large for the given bitrate!\n");
386 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
387 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
388 s->codec_id != AV_CODEC_ID_FLV1) {
389 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
393 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
394 av_log(avctx, AV_LOG_ERROR,
395 "OBMC is only supported with simple mb decision\n");
399 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
400 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
404 if (s->max_b_frames &&
405 s->codec_id != AV_CODEC_ID_MPEG4 &&
406 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
407 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
408 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
412 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
413 s->codec_id == AV_CODEC_ID_H263 ||
414 s->codec_id == AV_CODEC_ID_H263P) &&
415 (avctx->sample_aspect_ratio.num > 255 ||
416 avctx->sample_aspect_ratio.den > 255)) {
417 av_log(avctx, AV_LOG_ERROR,
418 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
419 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
423 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
424 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
425 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
429 // FIXME mpeg2 uses that too
430 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
431 av_log(avctx, AV_LOG_ERROR,
432 "mpeg2 style quantization not supported by codec\n");
436 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
437 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
441 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
442 s->avctx->mb_decision != FF_MB_DECISION_RD) {
443 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
447 if (s->avctx->scenechange_threshold < 1000000000 &&
448 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
449 av_log(avctx, AV_LOG_ERROR,
450 "closed gop with scene change detection are not supported yet, "
451 "set threshold to 1000000000\n");
455 if (s->flags & CODEC_FLAG_LOW_DELAY) {
456 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
457 av_log(avctx, AV_LOG_ERROR,
458 "low delay forcing is only available for mpeg2\n");
461 if (s->max_b_frames != 0) {
462 av_log(avctx, AV_LOG_ERROR,
463 "b frames cannot be used with low delay\n");
468 if (s->q_scale_type == 1) {
469 if (avctx->qmax > 12) {
470 av_log(avctx, AV_LOG_ERROR,
471 "non linear quant only supports qmax <= 12 currently\n");
476 if (s->avctx->thread_count > 1 &&
477 s->codec_id != AV_CODEC_ID_MPEG4 &&
478 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
479 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
480 (s->codec_id != AV_CODEC_ID_H263P)) {
481 av_log(avctx, AV_LOG_ERROR,
482 "multi threaded encoding not supported by codec\n");
486 if (s->avctx->thread_count < 1) {
487 av_log(avctx, AV_LOG_ERROR,
488 "automatic thread number detection not supported by codec,"
493 if (s->avctx->thread_count > 1)
496 if (!avctx->time_base.den || !avctx->time_base.num) {
497 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
501 i = (INT_MAX / 2 + 128) >> 8;
502 if (avctx->mb_threshold >= i) {
503 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
508 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
509 av_log(avctx, AV_LOG_INFO,
510 "notice: b_frame_strategy only affects the first pass\n");
511 avctx->b_frame_strategy = 0;
514 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
516 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
517 avctx->time_base.den /= i;
518 avctx->time_base.num /= i;
522 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
523 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
524 // (a + x * 3 / 8) / x
525 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
526 s->inter_quant_bias = 0;
528 s->intra_quant_bias = 0;
530 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
533 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
534 s->intra_quant_bias = avctx->intra_quant_bias;
535 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
536 s->inter_quant_bias = avctx->inter_quant_bias;
538 av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
541 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
542 s->avctx->time_base.den > (1 << 16) - 1) {
543 av_log(avctx, AV_LOG_ERROR,
544 "timebase %d/%d not supported by MPEG 4 standard, "
545 "the maximum admitted value for the timebase denominator "
546 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
550 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
552 switch (avctx->codec->id) {
553 case AV_CODEC_ID_MPEG1VIDEO:
554 s->out_format = FMT_MPEG1;
555 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
556 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
558 case AV_CODEC_ID_MPEG2VIDEO:
559 s->out_format = FMT_MPEG1;
560 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
561 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
564 case AV_CODEC_ID_LJPEG:
565 case AV_CODEC_ID_MJPEG:
566 s->out_format = FMT_MJPEG;
567 s->intra_only = 1; /* force intra only for jpeg */
568 if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
569 avctx->pix_fmt == AV_PIX_FMT_BGRA) {
570 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
571 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
572 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
574 s->mjpeg_vsample[0] = 2;
575 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
576 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
577 s->mjpeg_hsample[0] = 2;
578 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
579 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
581 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
582 ff_mjpeg_encode_init(s) < 0)
587 case AV_CODEC_ID_H261:
588 if (!CONFIG_H261_ENCODER)
590 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
591 av_log(avctx, AV_LOG_ERROR,
592 "The specified picture size of %dx%d is not valid for the "
593 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
594 s->width, s->height);
597 s->out_format = FMT_H261;
601 case AV_CODEC_ID_H263:
602 if (!CONFIG_H263_ENCODER)
604 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
605 s->width, s->height) == 8) {
606 av_log(avctx, AV_LOG_INFO,
607 "The specified picture size of %dx%d is not valid for "
608 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
609 "352x288, 704x576, and 1408x1152."
610 "Try H.263+.\n", s->width, s->height);
613 s->out_format = FMT_H263;
617 case AV_CODEC_ID_H263P:
618 s->out_format = FMT_H263;
621 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
622 s->modified_quant = s->h263_aic;
623 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
624 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
627 /* These are just to be sure */
631 case AV_CODEC_ID_FLV1:
632 s->out_format = FMT_H263;
633 s->h263_flv = 2; /* format = 1; 11-bit codes */
634 s->unrestricted_mv = 1;
635 s->rtp_mode = 0; /* don't allow GOB */
639 case AV_CODEC_ID_RV10:
640 s->out_format = FMT_H263;
644 case AV_CODEC_ID_RV20:
645 s->out_format = FMT_H263;
648 s->modified_quant = 1;
652 s->unrestricted_mv = 0;
654 case AV_CODEC_ID_MPEG4:
655 s->out_format = FMT_H263;
657 s->unrestricted_mv = 1;
658 s->low_delay = s->max_b_frames ? 0 : 1;
659 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
661 case AV_CODEC_ID_MSMPEG4V2:
662 s->out_format = FMT_H263;
664 s->unrestricted_mv = 1;
665 s->msmpeg4_version = 2;
669 case AV_CODEC_ID_MSMPEG4V3:
670 s->out_format = FMT_H263;
672 s->unrestricted_mv = 1;
673 s->msmpeg4_version = 3;
674 s->flipflop_rounding = 1;
678 case AV_CODEC_ID_WMV1:
679 s->out_format = FMT_H263;
681 s->unrestricted_mv = 1;
682 s->msmpeg4_version = 4;
683 s->flipflop_rounding = 1;
687 case AV_CODEC_ID_WMV2:
688 s->out_format = FMT_H263;
690 s->unrestricted_mv = 1;
691 s->msmpeg4_version = 5;
692 s->flipflop_rounding = 1;
700 avctx->has_b_frames = !s->low_delay;
704 s->progressive_frame =
705 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
706 CODEC_FLAG_INTERLACED_ME) ||
710 if (ff_MPV_common_init(s) < 0)
714 ff_MPV_encode_init_x86(s);
716 ff_h263dsp_init(&s->h263dsp);
717 if (!s->dct_quantize)
718 s->dct_quantize = ff_dct_quantize_c;
720 s->denoise_dct = denoise_dct_c;
721 s->fast_dct_quantize = s->dct_quantize;
723 s->dct_quantize = dct_quantize_trellis_c;
725 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
726 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
728 s->quant_precision = 5;
730 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
731 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
733 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
734 ff_h261_encode_init(s);
735 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
736 ff_h263_encode_init(s);
737 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
738 ff_msmpeg4_encode_init(s);
739 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
740 && s->out_format == FMT_MPEG1)
741 ff_mpeg1_encode_init(s);
744 for (i = 0; i < 64; i++) {
745 int j = s->dsp.idct_permutation[i];
746 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
748 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
749 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
750 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
752 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
755 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
756 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
758 if (s->avctx->intra_matrix)
759 s->intra_matrix[j] = s->avctx->intra_matrix[i];
760 if (s->avctx->inter_matrix)
761 s->inter_matrix[j] = s->avctx->inter_matrix[i];
764 /* precompute matrix */
765 /* for mjpeg, we do include qscale in the matrix */
766 if (s->out_format != FMT_MJPEG) {
767 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
768 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
770 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
771 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
775 if (ff_rate_control_init(s) < 0)
778 #if FF_API_ERROR_RATE
779 FF_DISABLE_DEPRECATION_WARNINGS
780 if (avctx->error_rate)
781 s->error_rate = avctx->error_rate;
782 FF_ENABLE_DEPRECATION_WARNINGS;
785 if (avctx->b_frame_strategy == 2) {
786 for (i = 0; i < s->max_b_frames + 2; i++) {
787 s->tmp_frames[i] = av_frame_alloc();
788 if (!s->tmp_frames[i])
789 return AVERROR(ENOMEM);
791 s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
792 s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
793 s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
795 ret = av_frame_get_buffer(s->tmp_frames[i], 32);
804 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
806 MpegEncContext *s = avctx->priv_data;
809 ff_rate_control_uninit(s);
811 ff_MPV_common_end(s);
812 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
813 s->out_format == FMT_MJPEG)
814 ff_mjpeg_encode_close(s);
816 av_freep(&avctx->extradata);
818 for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
819 av_frame_free(&s->tmp_frames[i]);
824 static int get_sae(uint8_t *src, int ref, int stride)
829 for (y = 0; y < 16; y++) {
830 for (x = 0; x < 16; x++) {
831 acc += FFABS(src[x + y * stride] - ref);
838 static int get_intra_count(MpegEncContext *s, uint8_t *src,
839 uint8_t *ref, int stride)
847 for (y = 0; y < h; y += 16) {
848 for (x = 0; x < w; x += 16) {
849 int offset = x + y * stride;
850 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
852 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
853 int sae = get_sae(src + offset, mean, stride);
855 acc += sae + 500 < sad;
862 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
866 int i, display_picture_number = 0, ret;
867 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
868 (s->low_delay ? 0 : 1);
873 display_picture_number = s->input_picture_number++;
875 if (pts != AV_NOPTS_VALUE) {
876 if (s->user_specified_pts != AV_NOPTS_VALUE) {
878 int64_t last = s->user_specified_pts;
881 av_log(s->avctx, AV_LOG_ERROR,
882 "Error, Invalid timestamp=%"PRId64", "
883 "last=%"PRId64"\n", pts, s->user_specified_pts);
887 if (!s->low_delay && display_picture_number == 1)
888 s->dts_delta = time - last;
890 s->user_specified_pts = pts;
892 if (s->user_specified_pts != AV_NOPTS_VALUE) {
893 s->user_specified_pts =
894 pts = s->user_specified_pts + 1;
895 av_log(s->avctx, AV_LOG_INFO,
896 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
899 pts = display_picture_number;
905 if (!pic_arg->buf[0]);
907 if (pic_arg->linesize[0] != s->linesize)
909 if (pic_arg->linesize[1] != s->uvlinesize)
911 if (pic_arg->linesize[2] != s->uvlinesize)
914 av_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
915 pic_arg->linesize[1], s->linesize, s->uvlinesize);
918 i = ff_find_unused_picture(s, 1);
922 pic = &s->picture[i];
925 if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
927 if (ff_alloc_picture(s, pic, 1) < 0) {
931 i = ff_find_unused_picture(s, 0);
935 pic = &s->picture[i];
938 if (ff_alloc_picture(s, pic, 0) < 0) {
942 if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
943 pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
944 pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
947 int h_chroma_shift, v_chroma_shift;
948 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
952 for (i = 0; i < 3; i++) {
953 int src_stride = pic_arg->linesize[i];
954 int dst_stride = i ? s->uvlinesize : s->linesize;
955 int h_shift = i ? h_chroma_shift : 0;
956 int v_shift = i ? v_chroma_shift : 0;
957 int w = s->width >> h_shift;
958 int h = s->height >> v_shift;
959 uint8_t *src = pic_arg->data[i];
960 uint8_t *dst = pic->f.data[i];
962 if (!s->avctx->rc_buffer_size)
963 dst += INPLACE_OFFSET;
965 if (src_stride == dst_stride)
966 memcpy(dst, src, src_stride * h);
977 ret = av_frame_copy_props(&pic->f, pic_arg);
981 pic->f.display_picture_number = display_picture_number;
982 pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
985 /* shift buffer entries */
986 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
987 s->input_picture[i - 1] = s->input_picture[i];
989 s->input_picture[encoding_delay] = (Picture*) pic;
994 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1000 for (plane = 0; plane < 3; plane++) {
1001 const int stride = p->f.linesize[plane];
1002 const int bw = plane ? 1 : 2;
1003 for (y = 0; y < s->mb_height * bw; y++) {
1004 for (x = 0; x < s->mb_width * bw; x++) {
1005 int off = p->shared ? 0 : 16;
1006 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1007 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1008 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1010 switch (s->avctx->frame_skip_exp) {
1011 case 0: score = FFMAX(score, v); break;
1012 case 1: score += FFABS(v); break;
1013 case 2: score += v * v; break;
1014 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1015 case 4: score64 += v * v * (int64_t)(v * v); break;
1024 if (score64 < s->avctx->frame_skip_threshold)
1026 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1031 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1033 AVPacket pkt = { 0 };
1034 int ret, got_output;
1036 av_init_packet(&pkt);
1037 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1042 av_free_packet(&pkt);
1046 static int estimate_best_b_count(MpegEncContext *s)
1048 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1049 AVCodecContext *c = avcodec_alloc_context3(NULL);
1050 const int scale = s->avctx->brd_scale;
1051 int i, j, out_size, p_lambda, b_lambda, lambda2;
1052 int64_t best_rd = INT64_MAX;
1053 int best_b_count = -1;
1055 assert(scale >= 0 && scale <= 3);
1058 //s->next_picture_ptr->quality;
1059 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1060 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1061 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1062 if (!b_lambda) // FIXME we should do this somewhere else
1063 b_lambda = p_lambda;
1064 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1067 c->width = s->width >> scale;
1068 c->height = s->height >> scale;
1069 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1070 CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1071 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1072 c->mb_decision = s->avctx->mb_decision;
1073 c->me_cmp = s->avctx->me_cmp;
1074 c->mb_cmp = s->avctx->mb_cmp;
1075 c->me_sub_cmp = s->avctx->me_sub_cmp;
1076 c->pix_fmt = AV_PIX_FMT_YUV420P;
1077 c->time_base = s->avctx->time_base;
1078 c->max_b_frames = s->max_b_frames;
1080 if (avcodec_open2(c, codec, NULL) < 0)
1083 for (i = 0; i < s->max_b_frames + 2; i++) {
1084 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1085 s->next_picture_ptr;
1087 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1088 pre_input = *pre_input_ptr;
1090 if (!pre_input.shared && i) {
1091 pre_input.f.data[0] += INPLACE_OFFSET;
1092 pre_input.f.data[1] += INPLACE_OFFSET;
1093 pre_input.f.data[2] += INPLACE_OFFSET;
1096 s->dsp.shrink[scale](s->tmp_frames[i]->data[0], s->tmp_frames[i]->linesize[0],
1097 pre_input.f.data[0], pre_input.f.linesize[0],
1098 c->width, c->height);
1099 s->dsp.shrink[scale](s->tmp_frames[i]->data[1], s->tmp_frames[i]->linesize[1],
1100 pre_input.f.data[1], pre_input.f.linesize[1],
1101 c->width >> 1, c->height >> 1);
1102 s->dsp.shrink[scale](s->tmp_frames[i]->data[2], s->tmp_frames[i]->linesize[2],
1103 pre_input.f.data[2], pre_input.f.linesize[2],
1104 c->width >> 1, c->height >> 1);
1108 for (j = 0; j < s->max_b_frames + 1; j++) {
1111 if (!s->input_picture[j])
1114 c->error[0] = c->error[1] = c->error[2] = 0;
1116 s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1117 s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1119 out_size = encode_frame(c, s->tmp_frames[0]);
1121 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1123 for (i = 0; i < s->max_b_frames + 1; i++) {
1124 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1126 s->tmp_frames[i + 1]->pict_type = is_p ?
1127 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1128 s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1130 out_size = encode_frame(c, s->tmp_frames[i + 1]);
1132 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1135 /* get the delayed frames */
1137 out_size = encode_frame(c, NULL);
1138 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1141 rd += c->error[0] + c->error[1] + c->error[2];
1152 return best_b_count;
1155 static int select_input_picture(MpegEncContext *s)
1159 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1160 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1161 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1163 /* set next picture type & ordering */
1164 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1165 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1166 s->next_picture_ptr == NULL || s->intra_only) {
1167 s->reordered_input_picture[0] = s->input_picture[0];
1168 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1169 s->reordered_input_picture[0]->f.coded_picture_number =
1170 s->coded_picture_number++;
1174 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1175 if (s->picture_in_gop_number < s->gop_size &&
1176 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1177 // FIXME check that te gop check above is +-1 correct
1178 av_frame_unref(&s->input_picture[0]->f);
1181 ff_vbv_update(s, 0);
1187 if (s->flags & CODEC_FLAG_PASS2) {
1188 for (i = 0; i < s->max_b_frames + 1; i++) {
1189 int pict_num = s->input_picture[0]->f.display_picture_number + i;
1191 if (pict_num >= s->rc_context.num_entries)
1193 if (!s->input_picture[i]) {
1194 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1198 s->input_picture[i]->f.pict_type =
1199 s->rc_context.entry[pict_num].new_pict_type;
1203 if (s->avctx->b_frame_strategy == 0) {
1204 b_frames = s->max_b_frames;
1205 while (b_frames && !s->input_picture[b_frames])
1207 } else if (s->avctx->b_frame_strategy == 1) {
1208 for (i = 1; i < s->max_b_frames + 1; i++) {
1209 if (s->input_picture[i] &&
1210 s->input_picture[i]->b_frame_score == 0) {
1211 s->input_picture[i]->b_frame_score =
1213 s->input_picture[i ]->f.data[0],
1214 s->input_picture[i - 1]->f.data[0],
1218 for (i = 0; i < s->max_b_frames + 1; i++) {
1219 if (s->input_picture[i] == NULL ||
1220 s->input_picture[i]->b_frame_score - 1 >
1221 s->mb_num / s->avctx->b_sensitivity)
1225 b_frames = FFMAX(0, i - 1);
1228 for (i = 0; i < b_frames + 1; i++) {
1229 s->input_picture[i]->b_frame_score = 0;
1231 } else if (s->avctx->b_frame_strategy == 2) {
1232 b_frames = estimate_best_b_count(s);
1234 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1240 for (i = b_frames - 1; i >= 0; i--) {
1241 int type = s->input_picture[i]->f.pict_type;
1242 if (type && type != AV_PICTURE_TYPE_B)
1245 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1246 b_frames == s->max_b_frames) {
1247 av_log(s->avctx, AV_LOG_ERROR,
1248 "warning, too many b frames in a row\n");
1251 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1252 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1253 s->gop_size > s->picture_in_gop_number) {
1254 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1256 if (s->flags & CODEC_FLAG_CLOSED_GOP)
1258 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1262 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1263 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1266 s->reordered_input_picture[0] = s->input_picture[b_frames];
1267 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1268 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1269 s->reordered_input_picture[0]->f.coded_picture_number =
1270 s->coded_picture_number++;
1271 for (i = 0; i < b_frames; i++) {
1272 s->reordered_input_picture[i + 1] = s->input_picture[i];
1273 s->reordered_input_picture[i + 1]->f.pict_type =
1275 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1276 s->coded_picture_number++;
1281 if (s->reordered_input_picture[0]) {
1282 s->reordered_input_picture[0]->reference =
1283 s->reordered_input_picture[0]->f.pict_type !=
1284 AV_PICTURE_TYPE_B ? 3 : 0;
1286 ff_mpeg_unref_picture(s, &s->new_picture);
1287 if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1290 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1291 // input is a shared pix, so we can't modifiy it -> alloc a new
1292 // one & ensure that the shared one is reuseable
1295 int i = ff_find_unused_picture(s, 0);
1298 pic = &s->picture[i];
1300 pic->reference = s->reordered_input_picture[0]->reference;
1301 if (ff_alloc_picture(s, pic, 0) < 0) {
1305 ret = av_frame_copy_props(&pic->f, &s->reordered_input_picture[0]->f);
1309 /* mark us unused / free shared pic */
1310 av_frame_unref(&s->reordered_input_picture[0]->f);
1311 s->reordered_input_picture[0]->shared = 0;
1313 s->current_picture_ptr = pic;
1315 // input is not a shared pix -> reuse buffer for current_pix
1316 s->current_picture_ptr = s->reordered_input_picture[0];
1317 for (i = 0; i < 4; i++) {
1318 s->new_picture.f.data[i] += INPLACE_OFFSET;
1321 ff_mpeg_unref_picture(s, &s->current_picture);
1322 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1323 s->current_picture_ptr)) < 0)
1326 s->picture_number = s->new_picture.f.display_picture_number;
1328 ff_mpeg_unref_picture(s, &s->new_picture);
1333 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1334 const AVFrame *pic_arg, int *got_packet)
1336 MpegEncContext *s = avctx->priv_data;
1337 int i, stuffing_count, ret;
1338 int context_count = s->slice_context_count;
1340 s->picture_in_gop_number++;
1342 if (load_input_picture(s, pic_arg) < 0)
1345 if (select_input_picture(s) < 0) {
1350 if (s->new_picture.f.data[0]) {
1352 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1355 s->mb_info_ptr = av_packet_new_side_data(pkt,
1356 AV_PKT_DATA_H263_MB_INFO,
1357 s->mb_width*s->mb_height*12);
1358 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1361 for (i = 0; i < context_count; i++) {
1362 int start_y = s->thread_context[i]->start_mb_y;
1363 int end_y = s->thread_context[i]-> end_mb_y;
1364 int h = s->mb_height;
1365 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1366 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1368 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1371 s->pict_type = s->new_picture.f.pict_type;
1373 ff_MPV_frame_start(s, avctx);
1375 if (encode_picture(s, s->picture_number) < 0)
1378 avctx->header_bits = s->header_bits;
1379 avctx->mv_bits = s->mv_bits;
1380 avctx->misc_bits = s->misc_bits;
1381 avctx->i_tex_bits = s->i_tex_bits;
1382 avctx->p_tex_bits = s->p_tex_bits;
1383 avctx->i_count = s->i_count;
1384 // FIXME f/b_count in avctx
1385 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1386 avctx->skip_count = s->skip_count;
1388 ff_MPV_frame_end(s);
1390 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1391 ff_mjpeg_encode_picture_trailer(s);
1393 if (avctx->rc_buffer_size) {
1394 RateControlContext *rcc = &s->rc_context;
1395 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1397 if (put_bits_count(&s->pb) > max_size &&
1398 s->lambda < s->avctx->lmax) {
1399 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1400 (s->qscale + 1) / s->qscale);
1401 if (s->adaptive_quant) {
1403 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1404 s->lambda_table[i] =
1405 FFMAX(s->lambda_table[i] + 1,
1406 s->lambda_table[i] * (s->qscale + 1) /
1409 s->mb_skipped = 0; // done in MPV_frame_start()
1410 // done in encode_picture() so we must undo it
1411 if (s->pict_type == AV_PICTURE_TYPE_P) {
1412 if (s->flipflop_rounding ||
1413 s->codec_id == AV_CODEC_ID_H263P ||
1414 s->codec_id == AV_CODEC_ID_MPEG4)
1415 s->no_rounding ^= 1;
1417 if (s->pict_type != AV_PICTURE_TYPE_B) {
1418 s->time_base = s->last_time_base;
1419 s->last_non_b_time = s->time - s->pp_time;
1421 for (i = 0; i < context_count; i++) {
1422 PutBitContext *pb = &s->thread_context[i]->pb;
1423 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1428 assert(s->avctx->rc_max_rate);
1431 if (s->flags & CODEC_FLAG_PASS1)
1432 ff_write_pass1_stats(s);
1434 for (i = 0; i < 4; i++) {
1435 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1436 avctx->error[i] += s->current_picture_ptr->f.error[i];
1439 if (s->flags & CODEC_FLAG_PASS1)
1440 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1441 avctx->i_tex_bits + avctx->p_tex_bits ==
1442 put_bits_count(&s->pb));
1443 flush_put_bits(&s->pb);
1444 s->frame_bits = put_bits_count(&s->pb);
1446 stuffing_count = ff_vbv_update(s, s->frame_bits);
1447 if (stuffing_count) {
1448 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1449 stuffing_count + 50) {
1450 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1454 switch (s->codec_id) {
1455 case AV_CODEC_ID_MPEG1VIDEO:
1456 case AV_CODEC_ID_MPEG2VIDEO:
1457 while (stuffing_count--) {
1458 put_bits(&s->pb, 8, 0);
1461 case AV_CODEC_ID_MPEG4:
1462 put_bits(&s->pb, 16, 0);
1463 put_bits(&s->pb, 16, 0x1C3);
1464 stuffing_count -= 4;
1465 while (stuffing_count--) {
1466 put_bits(&s->pb, 8, 0xFF);
1470 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1472 flush_put_bits(&s->pb);
1473 s->frame_bits = put_bits_count(&s->pb);
1476 /* update mpeg1/2 vbv_delay for CBR */
1477 if (s->avctx->rc_max_rate &&
1478 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1479 s->out_format == FMT_MPEG1 &&
1480 90000LL * (avctx->rc_buffer_size - 1) <=
1481 s->avctx->rc_max_rate * 0xFFFFLL) {
1482 int vbv_delay, min_delay;
1483 double inbits = s->avctx->rc_max_rate *
1484 av_q2d(s->avctx->time_base);
1485 int minbits = s->frame_bits - 8 *
1486 (s->vbv_delay_ptr - s->pb.buf - 1);
1487 double bits = s->rc_context.buffer_index + minbits - inbits;
1490 av_log(s->avctx, AV_LOG_ERROR,
1491 "Internal error, negative bits\n");
1493 assert(s->repeat_first_field == 0);
1495 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1496 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1497 s->avctx->rc_max_rate;
1499 vbv_delay = FFMAX(vbv_delay, min_delay);
1501 assert(vbv_delay < 0xFFFF);
1503 s->vbv_delay_ptr[0] &= 0xF8;
1504 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1505 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1506 s->vbv_delay_ptr[2] &= 0x07;
1507 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1508 avctx->vbv_delay = vbv_delay * 300;
1510 s->total_bits += s->frame_bits;
1511 avctx->frame_bits = s->frame_bits;
1513 pkt->pts = s->current_picture.f.pts;
1514 if (!s->low_delay) {
1515 if (!s->current_picture.f.coded_picture_number)
1516 pkt->dts = pkt->pts - s->dts_delta;
1518 pkt->dts = s->reordered_pts;
1519 s->reordered_pts = s->input_picture[0]->f.pts;
1521 pkt->dts = pkt->pts;
1522 if (s->current_picture.f.key_frame)
1523 pkt->flags |= AV_PKT_FLAG_KEY;
1525 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1529 assert((s->frame_bits & 7) == 0);
1531 pkt->size = s->frame_bits / 8;
1532 *got_packet = !!pkt->size;
1536 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1537 int n, int threshold)
1539 static const char tab[64] = {
1540 3, 2, 2, 1, 1, 1, 1, 1,
1541 1, 1, 1, 1, 1, 1, 1, 1,
1542 1, 1, 1, 1, 1, 1, 1, 1,
1543 0, 0, 0, 0, 0, 0, 0, 0,
1544 0, 0, 0, 0, 0, 0, 0, 0,
1545 0, 0, 0, 0, 0, 0, 0, 0,
1546 0, 0, 0, 0, 0, 0, 0, 0,
1547 0, 0, 0, 0, 0, 0, 0, 0
1552 int16_t *block = s->block[n];
1553 const int last_index = s->block_last_index[n];
1556 if (threshold < 0) {
1558 threshold = -threshold;
1562 /* Are all we could set to zero already zero? */
1563 if (last_index <= skip_dc - 1)
1566 for (i = 0; i <= last_index; i++) {
1567 const int j = s->intra_scantable.permutated[i];
1568 const int level = FFABS(block[j]);
1570 if (skip_dc && i == 0)
1574 } else if (level > 1) {
1580 if (score >= threshold)
1582 for (i = skip_dc; i <= last_index; i++) {
1583 const int j = s->intra_scantable.permutated[i];
1587 s->block_last_index[n] = 0;
1589 s->block_last_index[n] = -1;
1592 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1596 const int maxlevel = s->max_qcoeff;
1597 const int minlevel = s->min_qcoeff;
1601 i = 1; // skip clipping of intra dc
1605 for (; i <= last_index; i++) {
1606 const int j = s->intra_scantable.permutated[i];
1607 int level = block[j];
1609 if (level > maxlevel) {
1612 } else if (level < minlevel) {
1620 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1621 av_log(s->avctx, AV_LOG_INFO,
1622 "warning, clipping %d dct coefficients to %d..%d\n",
1623 overflow, minlevel, maxlevel);
1626 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1630 for (y = 0; y < 8; y++) {
1631 for (x = 0; x < 8; x++) {
1637 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1638 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1639 int v = ptr[x2 + y2 * stride];
1645 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1650 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1651 int motion_x, int motion_y,
1652 int mb_block_height,
1655 int16_t weight[8][64];
1656 int16_t orig[8][64];
1657 const int mb_x = s->mb_x;
1658 const int mb_y = s->mb_y;
1661 int dct_offset = s->linesize * 8; // default for progressive frames
1662 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1663 ptrdiff_t wrap_y, wrap_c;
1665 for (i = 0; i < mb_block_count; i++)
1666 skip_dct[i] = s->skipdct;
1668 if (s->adaptive_quant) {
1669 const int last_qp = s->qscale;
1670 const int mb_xy = mb_x + mb_y * s->mb_stride;
1672 s->lambda = s->lambda_table[mb_xy];
1675 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1676 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1677 s->dquant = s->qscale - last_qp;
1679 if (s->out_format == FMT_H263) {
1680 s->dquant = av_clip(s->dquant, -2, 2);
1682 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1684 if (s->pict_type == AV_PICTURE_TYPE_B) {
1685 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1688 if (s->mv_type == MV_TYPE_8X8)
1694 ff_set_qscale(s, last_qp + s->dquant);
1695 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1696 ff_set_qscale(s, s->qscale + s->dquant);
1698 wrap_y = s->linesize;
1699 wrap_c = s->uvlinesize;
1700 ptr_y = s->new_picture.f.data[0] +
1701 (mb_y * 16 * wrap_y) + mb_x * 16;
1702 ptr_cb = s->new_picture.f.data[1] +
1703 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1704 ptr_cr = s->new_picture.f.data[2] +
1705 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1707 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1708 uint8_t *ebuf = s->edge_emu_buffer + 32;
1709 s->vdsp.emulated_edge_mc(ebuf, ptr_y,
1711 16, 16, mb_x * 16, mb_y * 16,
1712 s->width, s->height);
1714 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb,
1716 8, mb_block_height, mb_x * 8, mb_y * 8,
1717 s->width >> 1, s->height >> 1);
1718 ptr_cb = ebuf + 18 * wrap_y;
1719 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr,
1721 8, mb_block_height, mb_x * 8, mb_y * 8,
1722 s->width >> 1, s->height >> 1);
1723 ptr_cr = ebuf + 18 * wrap_y + 8;
1727 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1728 int progressive_score, interlaced_score;
1730 s->interlaced_dct = 0;
1731 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1733 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1734 NULL, wrap_y, 8) - 400;
1736 if (progressive_score > 0) {
1737 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1738 NULL, wrap_y * 2, 8) +
1739 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1740 NULL, wrap_y * 2, 8);
1741 if (progressive_score > interlaced_score) {
1742 s->interlaced_dct = 1;
1744 dct_offset = wrap_y;
1746 if (s->chroma_format == CHROMA_422)
1752 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1753 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1754 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1755 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1757 if (s->flags & CODEC_FLAG_GRAY) {
1761 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1762 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1763 if (!s->chroma_y_shift) { /* 422 */
1764 s->dsp.get_pixels(s->block[6],
1765 ptr_cb + (dct_offset >> 1), wrap_c);
1766 s->dsp.get_pixels(s->block[7],
1767 ptr_cr + (dct_offset >> 1), wrap_c);
1771 op_pixels_func (*op_pix)[4];
1772 qpel_mc_func (*op_qpix)[16];
1773 uint8_t *dest_y, *dest_cb, *dest_cr;
1775 dest_y = s->dest[0];
1776 dest_cb = s->dest[1];
1777 dest_cr = s->dest[2];
1779 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1780 op_pix = s->hdsp.put_pixels_tab;
1781 op_qpix = s->dsp.put_qpel_pixels_tab;
1783 op_pix = s->hdsp.put_no_rnd_pixels_tab;
1784 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1787 if (s->mv_dir & MV_DIR_FORWARD) {
1788 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1789 s->last_picture.f.data,
1791 op_pix = s->hdsp.avg_pixels_tab;
1792 op_qpix = s->dsp.avg_qpel_pixels_tab;
1794 if (s->mv_dir & MV_DIR_BACKWARD) {
1795 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1796 s->next_picture.f.data,
1800 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1801 int progressive_score, interlaced_score;
1803 s->interlaced_dct = 0;
1804 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1807 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1808 ptr_y + wrap_y * 8, wrap_y,
1811 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1812 progressive_score -= 400;
1814 if (progressive_score > 0) {
1815 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1818 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1822 if (progressive_score > interlaced_score) {
1823 s->interlaced_dct = 1;
1825 dct_offset = wrap_y;
1827 if (s->chroma_format == CHROMA_422)
1833 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1834 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1835 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1836 dest_y + dct_offset, wrap_y);
1837 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1838 dest_y + dct_offset + 8, wrap_y);
1840 if (s->flags & CODEC_FLAG_GRAY) {
1844 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1845 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1846 if (!s->chroma_y_shift) { /* 422 */
1847 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1848 dest_cb + (dct_offset >> 1), wrap_c);
1849 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1850 dest_cr + (dct_offset >> 1), wrap_c);
1853 /* pre quantization */
1854 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1855 2 * s->qscale * s->qscale) {
1857 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1858 wrap_y, 8) < 20 * s->qscale)
1860 if (s->dsp.sad[1](NULL, ptr_y + 8,
1861 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1863 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1864 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1866 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1867 dest_y + dct_offset + 8,
1868 wrap_y, 8) < 20 * s->qscale)
1870 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1871 wrap_c, 8) < 20 * s->qscale)
1873 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1874 wrap_c, 8) < 20 * s->qscale)
1876 if (!s->chroma_y_shift) { /* 422 */
1877 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1878 dest_cb + (dct_offset >> 1),
1879 wrap_c, 8) < 20 * s->qscale)
1881 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1882 dest_cr + (dct_offset >> 1),
1883 wrap_c, 8) < 20 * s->qscale)
1889 if (s->quantizer_noise_shaping) {
1891 get_visual_weight(weight[0], ptr_y , wrap_y);
1893 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
1895 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
1897 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1899 get_visual_weight(weight[4], ptr_cb , wrap_c);
1901 get_visual_weight(weight[5], ptr_cr , wrap_c);
1902 if (!s->chroma_y_shift) { /* 422 */
1904 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1907 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1910 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
1913 /* DCT & quantize */
1914 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1916 for (i = 0; i < mb_block_count; i++) {
1919 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1920 // FIXME we could decide to change to quantizer instead of
1922 // JS: I don't think that would be a good idea it could lower
1923 // quality instead of improve it. Just INTRADC clipping
1924 // deserves changes in quantizer
1926 clip_coeffs(s, s->block[i], s->block_last_index[i]);
1928 s->block_last_index[i] = -1;
1930 if (s->quantizer_noise_shaping) {
1931 for (i = 0; i < mb_block_count; i++) {
1933 s->block_last_index[i] =
1934 dct_quantize_refine(s, s->block[i], weight[i],
1935 orig[i], i, s->qscale);
1940 if (s->luma_elim_threshold && !s->mb_intra)
1941 for (i = 0; i < 4; i++)
1942 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1943 if (s->chroma_elim_threshold && !s->mb_intra)
1944 for (i = 4; i < mb_block_count; i++)
1945 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1947 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
1948 for (i = 0; i < mb_block_count; i++) {
1949 if (s->block_last_index[i] == -1)
1950 s->coded_score[i] = INT_MAX / 256;
1955 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
1956 s->block_last_index[4] =
1957 s->block_last_index[5] = 0;
1959 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
1962 // non c quantize code returns incorrect block_last_index FIXME
1963 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
1964 for (i = 0; i < mb_block_count; i++) {
1966 if (s->block_last_index[i] > 0) {
1967 for (j = 63; j > 0; j--) {
1968 if (s->block[i][s->intra_scantable.permutated[j]])
1971 s->block_last_index[i] = j;
1976 /* huffman encode */
1977 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
1978 case AV_CODEC_ID_MPEG1VIDEO:
1979 case AV_CODEC_ID_MPEG2VIDEO:
1980 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1981 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
1983 case AV_CODEC_ID_MPEG4:
1984 if (CONFIG_MPEG4_ENCODER)
1985 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
1987 case AV_CODEC_ID_MSMPEG4V2:
1988 case AV_CODEC_ID_MSMPEG4V3:
1989 case AV_CODEC_ID_WMV1:
1990 if (CONFIG_MSMPEG4_ENCODER)
1991 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
1993 case AV_CODEC_ID_WMV2:
1994 if (CONFIG_WMV2_ENCODER)
1995 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
1997 case AV_CODEC_ID_H261:
1998 if (CONFIG_H261_ENCODER)
1999 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2001 case AV_CODEC_ID_H263:
2002 case AV_CODEC_ID_H263P:
2003 case AV_CODEC_ID_FLV1:
2004 case AV_CODEC_ID_RV10:
2005 case AV_CODEC_ID_RV20:
2006 if (CONFIG_H263_ENCODER)
2007 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2009 case AV_CODEC_ID_MJPEG:
2010 if (CONFIG_MJPEG_ENCODER)
2011 ff_mjpeg_encode_mb(s, s->block);
2018 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2020 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
2021 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2024 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2027 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2030 d->mb_skip_run= s->mb_skip_run;
2032 d->last_dc[i] = s->last_dc[i];
2035 d->mv_bits= s->mv_bits;
2036 d->i_tex_bits= s->i_tex_bits;
2037 d->p_tex_bits= s->p_tex_bits;
2038 d->i_count= s->i_count;
2039 d->f_count= s->f_count;
2040 d->b_count= s->b_count;
2041 d->skip_count= s->skip_count;
2042 d->misc_bits= s->misc_bits;
2046 d->qscale= s->qscale;
2047 d->dquant= s->dquant;
2049 d->esc3_level_length= s->esc3_level_length;
2052 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2055 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2056 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2059 d->mb_skip_run= s->mb_skip_run;
2061 d->last_dc[i] = s->last_dc[i];
2064 d->mv_bits= s->mv_bits;
2065 d->i_tex_bits= s->i_tex_bits;
2066 d->p_tex_bits= s->p_tex_bits;
2067 d->i_count= s->i_count;
2068 d->f_count= s->f_count;
2069 d->b_count= s->b_count;
2070 d->skip_count= s->skip_count;
2071 d->misc_bits= s->misc_bits;
2073 d->mb_intra= s->mb_intra;
2074 d->mb_skipped= s->mb_skipped;
2075 d->mv_type= s->mv_type;
2076 d->mv_dir= s->mv_dir;
2078 if(s->data_partitioning){
2080 d->tex_pb= s->tex_pb;
2084 d->block_last_index[i]= s->block_last_index[i];
2085 d->interlaced_dct= s->interlaced_dct;
2086 d->qscale= s->qscale;
2088 d->esc3_level_length= s->esc3_level_length;
2091 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2092 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2093 int *dmin, int *next_block, int motion_x, int motion_y)
2096 uint8_t *dest_backup[3];
2098 copy_context_before_encode(s, backup, type);
2100 s->block= s->blocks[*next_block];
2101 s->pb= pb[*next_block];
2102 if(s->data_partitioning){
2103 s->pb2 = pb2 [*next_block];
2104 s->tex_pb= tex_pb[*next_block];
2108 memcpy(dest_backup, s->dest, sizeof(s->dest));
2109 s->dest[0] = s->rd_scratchpad;
2110 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2111 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2112 assert(s->linesize >= 32); //FIXME
2115 encode_mb(s, motion_x, motion_y);
2117 score= put_bits_count(&s->pb);
2118 if(s->data_partitioning){
2119 score+= put_bits_count(&s->pb2);
2120 score+= put_bits_count(&s->tex_pb);
2123 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2124 ff_MPV_decode_mb(s, s->block);
2126 score *= s->lambda2;
2127 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2131 memcpy(s->dest, dest_backup, sizeof(s->dest));
2138 copy_context_after_encode(best, s, type);
2142 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2143 uint32_t *sq = ff_squareTbl + 256;
2148 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2149 else if(w==8 && h==8)
2150 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2154 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2163 static int sse_mb(MpegEncContext *s){
2167 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2168 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2171 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2172 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2173 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2174 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2176 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2177 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2178 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2181 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2182 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2183 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2186 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2187 MpegEncContext *s= *(void**)arg;
2191 s->me.dia_size= s->avctx->pre_dia_size;
2192 s->first_slice_line=1;
2193 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2194 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2195 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2197 s->first_slice_line=0;
2205 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2206 MpegEncContext *s= *(void**)arg;
2208 ff_check_alignment();
2210 s->me.dia_size= s->avctx->dia_size;
2211 s->first_slice_line=1;
2212 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2213 s->mb_x=0; //for block init below
2214 ff_init_block_index(s);
2215 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2216 s->block_index[0]+=2;
2217 s->block_index[1]+=2;
2218 s->block_index[2]+=2;
2219 s->block_index[3]+=2;
2221 /* compute motion vector & mb_type and store in context */
2222 if(s->pict_type==AV_PICTURE_TYPE_B)
2223 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2225 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2227 s->first_slice_line=0;
2232 static int mb_var_thread(AVCodecContext *c, void *arg){
2233 MpegEncContext *s= *(void**)arg;
2236 ff_check_alignment();
2238 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2239 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2242 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2244 int sum = s->dsp.pix_sum(pix, s->linesize);
2246 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2248 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2249 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2250 s->me.mb_var_sum_temp += varc;
2256 static void write_slice_end(MpegEncContext *s){
2257 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2258 if(s->partitioned_frame){
2259 ff_mpeg4_merge_partitions(s);
2262 ff_mpeg4_stuffing(&s->pb);
2263 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2264 ff_mjpeg_encode_stuffing(&s->pb);
2267 avpriv_align_put_bits(&s->pb);
2268 flush_put_bits(&s->pb);
2270 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2271 s->misc_bits+= get_bits_diff(s);
2274 static void write_mb_info(MpegEncContext *s)
2276 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2277 int offset = put_bits_count(&s->pb);
2278 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2279 int gobn = s->mb_y / s->gob_index;
2281 if (CONFIG_H263_ENCODER)
2282 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2283 bytestream_put_le32(&ptr, offset);
2284 bytestream_put_byte(&ptr, s->qscale);
2285 bytestream_put_byte(&ptr, gobn);
2286 bytestream_put_le16(&ptr, mba);
2287 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2288 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2289 /* 4MV not implemented */
2290 bytestream_put_byte(&ptr, 0); /* hmv2 */
2291 bytestream_put_byte(&ptr, 0); /* vmv2 */
2294 static void update_mb_info(MpegEncContext *s, int startcode)
2298 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2299 s->mb_info_size += 12;
2300 s->prev_mb_info = s->last_mb_info;
2303 s->prev_mb_info = put_bits_count(&s->pb)/8;
2304 /* This might have incremented mb_info_size above, and we return without
2305 * actually writing any info into that slot yet. But in that case,
2306 * this will be called again at the start of the after writing the
2307 * start code, actually writing the mb info. */
2311 s->last_mb_info = put_bits_count(&s->pb)/8;
2312 if (!s->mb_info_size)
2313 s->mb_info_size += 12;
2317 static int encode_thread(AVCodecContext *c, void *arg){
2318 MpegEncContext *s= *(void**)arg;
2319 int mb_x, mb_y, pdif = 0;
2320 int chr_h= 16>>s->chroma_y_shift;
2322 MpegEncContext best_s, backup_s;
2323 uint8_t bit_buf[2][MAX_MB_BYTES];
2324 uint8_t bit_buf2[2][MAX_MB_BYTES];
2325 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2326 PutBitContext pb[2], pb2[2], tex_pb[2];
2328 ff_check_alignment();
2331 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2332 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2333 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2336 s->last_bits= put_bits_count(&s->pb);
2347 /* init last dc values */
2348 /* note: quant matrix value (8) is implied here */
2349 s->last_dc[i] = 128 << s->intra_dc_precision;
2351 s->current_picture.f.error[i] = 0;
2354 memset(s->last_mv, 0, sizeof(s->last_mv));
2358 switch(s->codec_id){
2359 case AV_CODEC_ID_H263:
2360 case AV_CODEC_ID_H263P:
2361 case AV_CODEC_ID_FLV1:
2362 if (CONFIG_H263_ENCODER)
2363 s->gob_index = ff_h263_get_gob_height(s);
2365 case AV_CODEC_ID_MPEG4:
2366 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2367 ff_mpeg4_init_partitions(s);
2373 s->first_slice_line = 1;
2374 s->ptr_lastgob = s->pb.buf;
2375 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2379 ff_set_qscale(s, s->qscale);
2380 ff_init_block_index(s);
2382 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2383 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2384 int mb_type= s->mb_type[xy];
2389 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2390 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2393 if(s->data_partitioning){
2394 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2395 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2396 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2402 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2403 ff_update_block_index(s);
2405 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2406 ff_h261_reorder_mb_index(s);
2407 xy= s->mb_y*s->mb_stride + s->mb_x;
2408 mb_type= s->mb_type[xy];
2411 /* write gob / video packet header */
2413 int current_packet_size, is_gob_start;
2415 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2417 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2419 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2421 switch(s->codec_id){
2422 case AV_CODEC_ID_H263:
2423 case AV_CODEC_ID_H263P:
2424 if(!s->h263_slice_structured)
2425 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2427 case AV_CODEC_ID_MPEG2VIDEO:
2428 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2429 case AV_CODEC_ID_MPEG1VIDEO:
2430 if(s->mb_skip_run) is_gob_start=0;
2435 if(s->start_mb_y != mb_y || mb_x!=0){
2438 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2439 ff_mpeg4_init_partitions(s);
2443 assert((put_bits_count(&s->pb)&7) == 0);
2444 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2446 if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2447 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2448 int d = 100 / s->error_rate;
2450 current_packet_size=0;
2451 s->pb.buf_ptr= s->ptr_lastgob;
2452 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2456 if (s->avctx->rtp_callback){
2457 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2458 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2460 update_mb_info(s, 1);
2462 switch(s->codec_id){
2463 case AV_CODEC_ID_MPEG4:
2464 if (CONFIG_MPEG4_ENCODER) {
2465 ff_mpeg4_encode_video_packet_header(s);
2466 ff_mpeg4_clean_buffers(s);
2469 case AV_CODEC_ID_MPEG1VIDEO:
2470 case AV_CODEC_ID_MPEG2VIDEO:
2471 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2472 ff_mpeg1_encode_slice_header(s);
2473 ff_mpeg1_clean_buffers(s);
2476 case AV_CODEC_ID_H263:
2477 case AV_CODEC_ID_H263P:
2478 if (CONFIG_H263_ENCODER)
2479 ff_h263_encode_gob_header(s, mb_y);
2483 if(s->flags&CODEC_FLAG_PASS1){
2484 int bits= put_bits_count(&s->pb);
2485 s->misc_bits+= bits - s->last_bits;
2489 s->ptr_lastgob += current_packet_size;
2490 s->first_slice_line=1;
2491 s->resync_mb_x=mb_x;
2492 s->resync_mb_y=mb_y;
2496 if( (s->resync_mb_x == s->mb_x)
2497 && s->resync_mb_y+1 == s->mb_y){
2498 s->first_slice_line=0;
2502 s->dquant=0; //only for QP_RD
2504 update_mb_info(s, 0);
2506 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2508 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2510 copy_context_before_encode(&backup_s, s, -1);
2512 best_s.data_partitioning= s->data_partitioning;
2513 best_s.partitioned_frame= s->partitioned_frame;
2514 if(s->data_partitioning){
2515 backup_s.pb2= s->pb2;
2516 backup_s.tex_pb= s->tex_pb;
2519 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2520 s->mv_dir = MV_DIR_FORWARD;
2521 s->mv_type = MV_TYPE_16X16;
2523 s->mv[0][0][0] = s->p_mv_table[xy][0];
2524 s->mv[0][0][1] = s->p_mv_table[xy][1];
2525 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2526 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2528 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2529 s->mv_dir = MV_DIR_FORWARD;
2530 s->mv_type = MV_TYPE_FIELD;
2533 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2534 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2535 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2537 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2538 &dmin, &next_block, 0, 0);
2540 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2541 s->mv_dir = MV_DIR_FORWARD;
2542 s->mv_type = MV_TYPE_16X16;
2546 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2547 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2549 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2550 s->mv_dir = MV_DIR_FORWARD;
2551 s->mv_type = MV_TYPE_8X8;
2554 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2555 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2557 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2558 &dmin, &next_block, 0, 0);
2560 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2561 s->mv_dir = MV_DIR_FORWARD;
2562 s->mv_type = MV_TYPE_16X16;
2564 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2565 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2566 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2567 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2569 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2570 s->mv_dir = MV_DIR_BACKWARD;
2571 s->mv_type = MV_TYPE_16X16;
2573 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2574 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2575 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2576 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2578 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2579 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2580 s->mv_type = MV_TYPE_16X16;
2582 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2583 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2584 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2585 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2586 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2587 &dmin, &next_block, 0, 0);
2589 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2590 s->mv_dir = MV_DIR_FORWARD;
2591 s->mv_type = MV_TYPE_FIELD;
2594 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2595 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2596 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2598 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2599 &dmin, &next_block, 0, 0);
2601 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2602 s->mv_dir = MV_DIR_BACKWARD;
2603 s->mv_type = MV_TYPE_FIELD;
2606 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2607 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2608 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2610 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2611 &dmin, &next_block, 0, 0);
2613 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2614 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2615 s->mv_type = MV_TYPE_FIELD;
2617 for(dir=0; dir<2; dir++){
2619 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2620 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2621 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2624 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2625 &dmin, &next_block, 0, 0);
2627 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2629 s->mv_type = MV_TYPE_16X16;
2633 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2634 &dmin, &next_block, 0, 0);
2635 if(s->h263_pred || s->h263_aic){
2637 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2639 ff_clean_intra_table_entries(s); //old mode?
2643 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2644 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2645 const int last_qp= backup_s.qscale;
2648 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2649 static const int dquant_tab[4]={-1,1,-2,2};
2651 assert(backup_s.dquant == 0);
2654 s->mv_dir= best_s.mv_dir;
2655 s->mv_type = MV_TYPE_16X16;
2656 s->mb_intra= best_s.mb_intra;
2657 s->mv[0][0][0] = best_s.mv[0][0][0];
2658 s->mv[0][0][1] = best_s.mv[0][0][1];
2659 s->mv[1][0][0] = best_s.mv[1][0][0];
2660 s->mv[1][0][1] = best_s.mv[1][0][1];
2662 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2663 for(; qpi<4; qpi++){
2664 int dquant= dquant_tab[qpi];
2665 qp= last_qp + dquant;
2666 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2668 backup_s.dquant= dquant;
2669 if(s->mb_intra && s->dc_val[0]){
2671 dc[i]= s->dc_val[0][ s->block_index[i] ];
2672 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2676 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2677 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2678 if(best_s.qscale != qp){
2679 if(s->mb_intra && s->dc_val[0]){
2681 s->dc_val[0][ s->block_index[i] ]= dc[i];
2682 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2689 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2690 int mx= s->b_direct_mv_table[xy][0];
2691 int my= s->b_direct_mv_table[xy][1];
2693 backup_s.dquant = 0;
2694 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2696 ff_mpeg4_set_direct_mv(s, mx, my);
2697 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2698 &dmin, &next_block, mx, my);
2700 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2701 backup_s.dquant = 0;
2702 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2704 ff_mpeg4_set_direct_mv(s, 0, 0);
2705 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2706 &dmin, &next_block, 0, 0);
2708 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2711 coded |= s->block_last_index[i];
2714 memcpy(s->mv, best_s.mv, sizeof(s->mv));
2715 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2716 mx=my=0; //FIXME find the one we actually used
2717 ff_mpeg4_set_direct_mv(s, mx, my);
2718 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2726 s->mv_dir= best_s.mv_dir;
2727 s->mv_type = best_s.mv_type;
2729 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2730 s->mv[0][0][1] = best_s.mv[0][0][1];
2731 s->mv[1][0][0] = best_s.mv[1][0][0];
2732 s->mv[1][0][1] = best_s.mv[1][0][1];*/
2735 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2736 &dmin, &next_block, mx, my);
2741 s->current_picture.qscale_table[xy] = best_s.qscale;
2743 copy_context_after_encode(s, &best_s, -1);
2745 pb_bits_count= put_bits_count(&s->pb);
2746 flush_put_bits(&s->pb);
2747 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2750 if(s->data_partitioning){
2751 pb2_bits_count= put_bits_count(&s->pb2);
2752 flush_put_bits(&s->pb2);
2753 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2754 s->pb2= backup_s.pb2;
2756 tex_pb_bits_count= put_bits_count(&s->tex_pb);
2757 flush_put_bits(&s->tex_pb);
2758 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2759 s->tex_pb= backup_s.tex_pb;
2761 s->last_bits= put_bits_count(&s->pb);
2763 if (CONFIG_H263_ENCODER &&
2764 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2765 ff_h263_update_motion_val(s);
2767 if(next_block==0){ //FIXME 16 vs linesize16
2768 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2769 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2770 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2773 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2774 ff_MPV_decode_mb(s, s->block);
2776 int motion_x = 0, motion_y = 0;
2777 s->mv_type=MV_TYPE_16X16;
2778 // only one MB-Type possible
2781 case CANDIDATE_MB_TYPE_INTRA:
2784 motion_x= s->mv[0][0][0] = 0;
2785 motion_y= s->mv[0][0][1] = 0;
2787 case CANDIDATE_MB_TYPE_INTER:
2788 s->mv_dir = MV_DIR_FORWARD;
2790 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2791 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2793 case CANDIDATE_MB_TYPE_INTER_I:
2794 s->mv_dir = MV_DIR_FORWARD;
2795 s->mv_type = MV_TYPE_FIELD;
2798 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2799 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2800 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2803 case CANDIDATE_MB_TYPE_INTER4V:
2804 s->mv_dir = MV_DIR_FORWARD;
2805 s->mv_type = MV_TYPE_8X8;
2808 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2809 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2812 case CANDIDATE_MB_TYPE_DIRECT:
2813 if (CONFIG_MPEG4_ENCODER) {
2814 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2816 motion_x=s->b_direct_mv_table[xy][0];
2817 motion_y=s->b_direct_mv_table[xy][1];
2818 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2821 case CANDIDATE_MB_TYPE_DIRECT0:
2822 if (CONFIG_MPEG4_ENCODER) {
2823 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2825 ff_mpeg4_set_direct_mv(s, 0, 0);
2828 case CANDIDATE_MB_TYPE_BIDIR:
2829 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2831 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2832 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2833 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2834 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2836 case CANDIDATE_MB_TYPE_BACKWARD:
2837 s->mv_dir = MV_DIR_BACKWARD;
2839 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2840 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2842 case CANDIDATE_MB_TYPE_FORWARD:
2843 s->mv_dir = MV_DIR_FORWARD;
2845 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2846 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2848 case CANDIDATE_MB_TYPE_FORWARD_I:
2849 s->mv_dir = MV_DIR_FORWARD;
2850 s->mv_type = MV_TYPE_FIELD;
2853 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2854 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2855 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2858 case CANDIDATE_MB_TYPE_BACKWARD_I:
2859 s->mv_dir = MV_DIR_BACKWARD;
2860 s->mv_type = MV_TYPE_FIELD;
2863 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2864 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2865 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2868 case CANDIDATE_MB_TYPE_BIDIR_I:
2869 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2870 s->mv_type = MV_TYPE_FIELD;
2872 for(dir=0; dir<2; dir++){
2874 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2875 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2876 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2881 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2884 encode_mb(s, motion_x, motion_y);
2886 // RAL: Update last macroblock type
2887 s->last_mv_dir = s->mv_dir;
2889 if (CONFIG_H263_ENCODER &&
2890 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2891 ff_h263_update_motion_val(s);
2893 ff_MPV_decode_mb(s, s->block);
2896 /* clean the MV table in IPS frames for direct mode in B frames */
2897 if(s->mb_intra /* && I,P,S_TYPE */){
2898 s->p_mv_table[xy][0]=0;
2899 s->p_mv_table[xy][1]=0;
2902 if(s->flags&CODEC_FLAG_PSNR){
2906 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2907 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2909 s->current_picture.f.error[0] += sse(
2910 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2911 s->dest[0], w, h, s->linesize);
2912 s->current_picture.f.error[1] += sse(
2913 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2914 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2915 s->current_picture.f.error[2] += sse(
2916 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2917 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2920 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2921 ff_h263_loop_filter(s);
2923 av_dlog(s->avctx, "MB %d %d bits\n",
2924 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
2928 //not beautiful here but we must write it before flushing so it has to be here
2929 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2930 ff_msmpeg4_encode_ext_header(s);
2934 /* Send the last GOB if RTP */
2935 if (s->avctx->rtp_callback) {
2936 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2937 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2938 /* Call the RTP callback to send the last GOB */
2940 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2946 #define MERGE(field) dst->field += src->field; src->field=0
2947 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2948 MERGE(me.scene_change_score);
2949 MERGE(me.mc_mb_var_sum_temp);
2950 MERGE(me.mb_var_sum_temp);
2953 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2956 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2957 MERGE(dct_count[1]);
2966 MERGE(er.error_count);
2967 MERGE(padding_bug_score);
2968 MERGE(current_picture.f.error[0]);
2969 MERGE(current_picture.f.error[1]);
2970 MERGE(current_picture.f.error[2]);
2972 if(dst->avctx->noise_reduction){
2973 for(i=0; i<64; i++){
2974 MERGE(dct_error_sum[0][i]);
2975 MERGE(dct_error_sum[1][i]);
2979 assert(put_bits_count(&src->pb) % 8 ==0);
2980 assert(put_bits_count(&dst->pb) % 8 ==0);
2981 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
2982 flush_put_bits(&dst->pb);
2985 static int estimate_qp(MpegEncContext *s, int dry_run){
2986 if (s->next_lambda){
2987 s->current_picture_ptr->f.quality =
2988 s->current_picture.f.quality = s->next_lambda;
2989 if(!dry_run) s->next_lambda= 0;
2990 } else if (!s->fixed_qscale) {
2991 s->current_picture_ptr->f.quality =
2992 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
2993 if (s->current_picture.f.quality < 0)
2997 if(s->adaptive_quant){
2998 switch(s->codec_id){
2999 case AV_CODEC_ID_MPEG4:
3000 if (CONFIG_MPEG4_ENCODER)
3001 ff_clean_mpeg4_qscales(s);
3003 case AV_CODEC_ID_H263:
3004 case AV_CODEC_ID_H263P:
3005 case AV_CODEC_ID_FLV1:
3006 if (CONFIG_H263_ENCODER)
3007 ff_clean_h263_qscales(s);
3010 ff_init_qscale_tab(s);
3013 s->lambda= s->lambda_table[0];
3016 s->lambda = s->current_picture.f.quality;
3021 /* must be called before writing the header */
3022 static void set_frame_distances(MpegEncContext * s){
3023 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3024 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3026 if(s->pict_type==AV_PICTURE_TYPE_B){
3027 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3028 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3030 s->pp_time= s->time - s->last_non_b_time;
3031 s->last_non_b_time= s->time;
3032 assert(s->picture_number==0 || s->pp_time > 0);
3036 static int encode_picture(MpegEncContext *s, int picture_number)
3040 int context_count = s->slice_context_count;
3042 s->picture_number = picture_number;
3044 /* Reset the average MB variance */
3045 s->me.mb_var_sum_temp =
3046 s->me.mc_mb_var_sum_temp = 0;
3048 /* we need to initialize some time vars before we can encode b-frames */
3049 // RAL: Condition added for MPEG1VIDEO
3050 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3051 set_frame_distances(s);
3052 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3053 ff_set_mpeg4_time(s);
3055 s->me.scene_change_score=0;
3057 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3059 if(s->pict_type==AV_PICTURE_TYPE_I){
3060 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3061 else s->no_rounding=0;
3062 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3063 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3064 s->no_rounding ^= 1;
3067 if(s->flags & CODEC_FLAG_PASS2){
3068 if (estimate_qp(s,1) < 0)
3070 ff_get_2pass_fcode(s);
3071 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3072 if(s->pict_type==AV_PICTURE_TYPE_B)
3073 s->lambda= s->last_lambda_for[s->pict_type];
3075 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3079 s->mb_intra=0; //for the rate distortion & bit compare functions
3080 for(i=1; i<context_count; i++){
3081 ret = ff_update_duplicate_context(s->thread_context[i], s);
3089 /* Estimate motion for every MB */
3090 if(s->pict_type != AV_PICTURE_TYPE_I){
3091 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3092 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3093 if (s->pict_type != AV_PICTURE_TYPE_B) {
3094 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3095 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3099 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3100 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3102 for(i=0; i<s->mb_stride*s->mb_height; i++)
3103 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3105 if(!s->fixed_qscale){
3106 /* finding spatial complexity for I-frame rate control */
3107 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3110 for(i=1; i<context_count; i++){
3111 merge_context_after_me(s, s->thread_context[i]);
3113 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3114 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3117 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3118 s->pict_type= AV_PICTURE_TYPE_I;
3119 for(i=0; i<s->mb_stride*s->mb_height; i++)
3120 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3121 av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3122 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3126 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3127 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3129 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3131 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3132 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3133 s->f_code= FFMAX3(s->f_code, a, b);
3136 ff_fix_long_p_mvs(s);
3137 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3138 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3142 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3143 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3148 if(s->pict_type==AV_PICTURE_TYPE_B){
3151 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3152 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3153 s->f_code = FFMAX(a, b);
3155 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3156 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3157 s->b_code = FFMAX(a, b);
3159 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3160 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3161 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3162 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3163 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3165 for(dir=0; dir<2; dir++){
3168 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3169 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3170 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3171 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3179 if (estimate_qp(s, 0) < 0)
3182 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3183 s->qscale= 3; //reduce clipping problems
3185 if (s->out_format == FMT_MJPEG) {
3186 /* for mjpeg, we do include qscale in the matrix */
3188 int j= s->dsp.idct_permutation[i];
3190 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3192 s->y_dc_scale_table=
3193 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3194 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3195 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3196 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3200 //FIXME var duplication
3201 s->current_picture_ptr->f.key_frame =
3202 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3203 s->current_picture_ptr->f.pict_type =
3204 s->current_picture.f.pict_type = s->pict_type;
3206 if (s->current_picture.f.key_frame)
3207 s->picture_in_gop_number=0;
3209 s->last_bits= put_bits_count(&s->pb);
3210 switch(s->out_format) {
3212 if (CONFIG_MJPEG_ENCODER)
3213 ff_mjpeg_encode_picture_header(s);
3216 if (CONFIG_H261_ENCODER)
3217 ff_h261_encode_picture_header(s, picture_number);
3220 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3221 ff_wmv2_encode_picture_header(s, picture_number);
3222 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3223 ff_msmpeg4_encode_picture_header(s, picture_number);
3224 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3225 ff_mpeg4_encode_picture_header(s, picture_number);
3226 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3227 ff_rv10_encode_picture_header(s, picture_number);
3228 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3229 ff_rv20_encode_picture_header(s, picture_number);
3230 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3231 ff_flv_encode_picture_header(s, picture_number);
3232 else if (CONFIG_H263_ENCODER)
3233 ff_h263_encode_picture_header(s, picture_number);
3236 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3237 ff_mpeg1_encode_picture_header(s, picture_number);
3242 bits= put_bits_count(&s->pb);
3243 s->header_bits= bits - s->last_bits;
3245 for(i=1; i<context_count; i++){
3246 update_duplicate_context_after_me(s->thread_context[i], s);
3248 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3249 for(i=1; i<context_count; i++){
3250 merge_context_after_encode(s, s->thread_context[i]);
3256 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3257 const int intra= s->mb_intra;
3260 s->dct_count[intra]++;
3262 for(i=0; i<64; i++){
3263 int level= block[i];
3267 s->dct_error_sum[intra][i] += level;
3268 level -= s->dct_offset[intra][i];
3269 if(level<0) level=0;
3271 s->dct_error_sum[intra][i] -= level;
3272 level += s->dct_offset[intra][i];
3273 if(level>0) level=0;
3280 static int dct_quantize_trellis_c(MpegEncContext *s,
3281 int16_t *block, int n,
3282 int qscale, int *overflow){
3284 const uint8_t *scantable= s->intra_scantable.scantable;
3285 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3287 unsigned int threshold1, threshold2;
3299 int coeff_count[64];
3300 int qmul, qadd, start_i, last_non_zero, i, dc;
3301 const int esc_length= s->ac_esc_length;
3303 uint8_t * last_length;
3304 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3306 s->dsp.fdct (block);
3308 if(s->dct_error_sum)
3309 s->denoise_dct(s, block);
3311 qadd= ((qscale-1)|1)*8;
3322 /* For AIC we skip quant/dequant of INTRADC */
3327 /* note: block[0] is assumed to be positive */
3328 block[0] = (block[0] + (q >> 1)) / q;
3331 qmat = s->q_intra_matrix[qscale];
3332 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3333 bias= 1<<(QMAT_SHIFT-1);
3334 length = s->intra_ac_vlc_length;
3335 last_length= s->intra_ac_vlc_last_length;
3339 qmat = s->q_inter_matrix[qscale];
3340 length = s->inter_ac_vlc_length;
3341 last_length= s->inter_ac_vlc_last_length;
3345 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3346 threshold2= (threshold1<<1);
3348 for(i=63; i>=start_i; i--) {
3349 const int j = scantable[i];
3350 int level = block[j] * qmat[j];
3352 if(((unsigned)(level+threshold1))>threshold2){
3358 for(i=start_i; i<=last_non_zero; i++) {
3359 const int j = scantable[i];
3360 int level = block[j] * qmat[j];
3362 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3363 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3364 if(((unsigned)(level+threshold1))>threshold2){
3366 level= (bias + level)>>QMAT_SHIFT;
3368 coeff[1][i]= level-1;
3369 // coeff[2][k]= level-2;
3371 level= (bias - level)>>QMAT_SHIFT;
3372 coeff[0][i]= -level;
3373 coeff[1][i]= -level+1;
3374 // coeff[2][k]= -level+2;
3376 coeff_count[i]= FFMIN(level, 2);
3377 assert(coeff_count[i]);
3380 coeff[0][i]= (level>>31)|1;
3385 *overflow= s->max_qcoeff < max; //overflow might have happened
3387 if(last_non_zero < start_i){
3388 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3389 return last_non_zero;
3392 score_tab[start_i]= 0;
3393 survivor[0]= start_i;
3396 for(i=start_i; i<=last_non_zero; i++){
3397 int level_index, j, zero_distortion;
3398 int dct_coeff= FFABS(block[ scantable[i] ]);
3399 int best_score=256*256*256*120;
3401 if (s->dsp.fdct == ff_fdct_ifast)
3402 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3403 zero_distortion= dct_coeff*dct_coeff;
3405 for(level_index=0; level_index < coeff_count[i]; level_index++){
3407 int level= coeff[level_index][i];
3408 const int alevel= FFABS(level);
3413 if(s->out_format == FMT_H263){
3414 unquant_coeff= alevel*qmul + qadd;
3416 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3418 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3419 unquant_coeff = (unquant_coeff - 1) | 1;
3421 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3422 unquant_coeff = (unquant_coeff - 1) | 1;
3427 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3429 if((level&(~127)) == 0){
3430 for(j=survivor_count-1; j>=0; j--){
3431 int run= i - survivor[j];
3432 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3433 score += score_tab[i-run];
3435 if(score < best_score){
3438 level_tab[i+1]= level-64;
3442 if(s->out_format == FMT_H263){
3443 for(j=survivor_count-1; j>=0; j--){
3444 int run= i - survivor[j];
3445 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3446 score += score_tab[i-run];
3447 if(score < last_score){
3450 last_level= level-64;
3456 distortion += esc_length*lambda;
3457 for(j=survivor_count-1; j>=0; j--){
3458 int run= i - survivor[j];
3459 int score= distortion + score_tab[i-run];
3461 if(score < best_score){
3464 level_tab[i+1]= level-64;
3468 if(s->out_format == FMT_H263){
3469 for(j=survivor_count-1; j>=0; j--){
3470 int run= i - survivor[j];
3471 int score= distortion + score_tab[i-run];
3472 if(score < last_score){
3475 last_level= level-64;
3483 score_tab[i+1]= best_score;
3485 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3486 if(last_non_zero <= 27){
3487 for(; survivor_count; survivor_count--){
3488 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3492 for(; survivor_count; survivor_count--){
3493 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3498 survivor[ survivor_count++ ]= i+1;
3501 if(s->out_format != FMT_H263){
3502 last_score= 256*256*256*120;
3503 for(i= survivor[0]; i<=last_non_zero + 1; i++){
3504 int score= score_tab[i];
3505 if(i) score += lambda*2; //FIXME exacter?
3507 if(score < last_score){
3510 last_level= level_tab[i];
3511 last_run= run_tab[i];
3516 s->coded_score[n] = last_score;
3518 dc= FFABS(block[0]);
3519 last_non_zero= last_i - 1;
3520 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3522 if(last_non_zero < start_i)
3523 return last_non_zero;
3525 if(last_non_zero == 0 && start_i == 0){
3527 int best_score= dc * dc;
3529 for(i=0; i<coeff_count[0]; i++){
3530 int level= coeff[i][0];
3531 int alevel= FFABS(level);
3532 int unquant_coeff, score, distortion;
3534 if(s->out_format == FMT_H263){
3535 unquant_coeff= (alevel*qmul + qadd)>>3;
3537 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3538 unquant_coeff = (unquant_coeff - 1) | 1;
3540 unquant_coeff = (unquant_coeff + 4) >> 3;
3541 unquant_coeff<<= 3 + 3;
3543 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3545 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3546 else score= distortion + esc_length*lambda;
3548 if(score < best_score){
3550 best_level= level - 64;
3553 block[0]= best_level;
3554 s->coded_score[n] = best_score - dc*dc;
3555 if(best_level == 0) return -1;
3556 else return last_non_zero;
3562 block[ perm_scantable[last_non_zero] ]= last_level;
3565 for(; i>start_i; i -= run_tab[i] + 1){
3566 block[ perm_scantable[i-1] ]= level_tab[i];
3569 return last_non_zero;
3572 //#define REFINE_STATS 1
3573 static int16_t basis[64][64];
3575 static void build_basis(uint8_t *perm){
3582 double s= 0.25*(1<<BASIS_SHIFT);
3584 int perm_index= perm[index];
3585 if(i==0) s*= sqrt(0.5);
3586 if(j==0) s*= sqrt(0.5);
3587 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3594 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3595 int16_t *block, int16_t *weight, int16_t *orig,
3598 LOCAL_ALIGNED_16(int16_t, d1, [64]);
3599 const uint8_t *scantable= s->intra_scantable.scantable;
3600 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3601 // unsigned int threshold1, threshold2;
3606 int qmul, qadd, start_i, last_non_zero, i, dc;
3608 uint8_t * last_length;
3610 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3613 static int after_last=0;
3614 static int to_zero=0;
3615 static int from_zero=0;
3618 static int messed_sign=0;
3621 if(basis[0][0] == 0)
3622 build_basis(s->dsp.idct_permutation);
3633 /* For AIC we skip quant/dequant of INTRADC */
3637 q <<= RECON_SHIFT-3;
3638 /* note: block[0] is assumed to be positive */
3640 // block[0] = (block[0] + (q >> 1)) / q;
3642 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3643 // bias= 1<<(QMAT_SHIFT-1);
3644 length = s->intra_ac_vlc_length;
3645 last_length= s->intra_ac_vlc_last_length;
3649 length = s->inter_ac_vlc_length;
3650 last_length= s->inter_ac_vlc_last_length;
3652 last_non_zero = s->block_last_index[n];
3657 dc += (1<<(RECON_SHIFT-1));
3658 for(i=0; i<64; i++){
3659 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3662 STOP_TIMER("memset rem[]")}
3665 for(i=0; i<64; i++){
3670 w= FFABS(weight[i]) + qns*one;
3671 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3674 // w=weight[i] = (63*qns + (w/2)) / w;
3680 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3686 for(i=start_i; i<=last_non_zero; i++){
3687 int j= perm_scantable[i];
3688 const int level= block[j];
3692 if(level<0) coeff= qmul*level - qadd;
3693 else coeff= qmul*level + qadd;
3694 run_tab[rle_index++]=run;
3697 s->dsp.add_8x8basis(rem, basis[j], coeff);
3703 if(last_non_zero>0){
3704 STOP_TIMER("init rem[]")
3711 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3714 int run2, best_unquant_change=0, analyze_gradient;
3718 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3720 if(analyze_gradient){
3724 for(i=0; i<64; i++){
3727 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3730 STOP_TIMER("rem*w*w")}
3740 const int level= block[0];
3741 int change, old_coeff;
3743 assert(s->mb_intra);
3747 for(change=-1; change<=1; change+=2){
3748 int new_level= level + change;
3749 int score, new_coeff;
3751 new_coeff= q*new_level;
3752 if(new_coeff >= 2048 || new_coeff < 0)
3755 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3756 if(score<best_score){
3759 best_change= change;
3760 best_unquant_change= new_coeff - old_coeff;
3767 run2= run_tab[rle_index++];
3771 for(i=start_i; i<64; i++){
3772 int j= perm_scantable[i];
3773 const int level= block[j];
3774 int change, old_coeff;
3776 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3780 if(level<0) old_coeff= qmul*level - qadd;
3781 else old_coeff= qmul*level + qadd;
3782 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3786 assert(run2>=0 || i >= last_non_zero );
3789 for(change=-1; change<=1; change+=2){
3790 int new_level= level + change;
3791 int score, new_coeff, unquant_change;
3794 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3798 if(new_level<0) new_coeff= qmul*new_level - qadd;
3799 else new_coeff= qmul*new_level + qadd;
3800 if(new_coeff >= 2048 || new_coeff <= -2048)
3802 //FIXME check for overflow
3805 if(level < 63 && level > -63){
3806 if(i < last_non_zero)
3807 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
3808 - length[UNI_AC_ENC_INDEX(run, level+64)];
3810 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3811 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3814 assert(FFABS(new_level)==1);
3816 if(analyze_gradient){
3817 int g= d1[ scantable[i] ];
3818 if(g && (g^new_level) >= 0)
3822 if(i < last_non_zero){
3823 int next_i= i + run2 + 1;
3824 int next_level= block[ perm_scantable[next_i] ] + 64;
3826 if(next_level&(~127))
3829 if(next_i < last_non_zero)
3830 score += length[UNI_AC_ENC_INDEX(run, 65)]
3831 + length[UNI_AC_ENC_INDEX(run2, next_level)]
3832 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3834 score += length[UNI_AC_ENC_INDEX(run, 65)]
3835 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3836 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3838 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3840 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3841 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3847 assert(FFABS(level)==1);
3849 if(i < last_non_zero){
3850 int next_i= i + run2 + 1;
3851 int next_level= block[ perm_scantable[next_i] ] + 64;
3853 if(next_level&(~127))
3856 if(next_i < last_non_zero)
3857 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3858 - length[UNI_AC_ENC_INDEX(run2, next_level)]
3859 - length[UNI_AC_ENC_INDEX(run, 65)];
3861 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3862 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3863 - length[UNI_AC_ENC_INDEX(run, 65)];
3865 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3867 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3868 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3875 unquant_change= new_coeff - old_coeff;
3876 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3878 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3879 if(score<best_score){
3882 best_change= change;
3883 best_unquant_change= unquant_change;
3887 prev_level= level + 64;
3888 if(prev_level&(~127))
3897 STOP_TIMER("iterative step")}
3901 int j= perm_scantable[ best_coeff ];
3903 block[j] += best_change;
3905 if(best_coeff > last_non_zero){
3906 last_non_zero= best_coeff;
3914 if(block[j] - best_change){
3915 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
3927 for(; last_non_zero>=start_i; last_non_zero--){
3928 if(block[perm_scantable[last_non_zero]])
3934 if(256*256*256*64 % count == 0){
3935 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
3940 for(i=start_i; i<=last_non_zero; i++){
3941 int j= perm_scantable[i];
3942 const int level= block[j];
3945 run_tab[rle_index++]=run;
3952 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
3958 if(last_non_zero>0){
3959 STOP_TIMER("iterative search")
3964 return last_non_zero;
3967 int ff_dct_quantize_c(MpegEncContext *s,
3968 int16_t *block, int n,
3969 int qscale, int *overflow)
3971 int i, j, level, last_non_zero, q, start_i;
3973 const uint8_t *scantable= s->intra_scantable.scantable;
3976 unsigned int threshold1, threshold2;
3978 s->dsp.fdct (block);
3980 if(s->dct_error_sum)
3981 s->denoise_dct(s, block);
3991 /* For AIC we skip quant/dequant of INTRADC */
3994 /* note: block[0] is assumed to be positive */
3995 block[0] = (block[0] + (q >> 1)) / q;
3998 qmat = s->q_intra_matrix[qscale];
3999 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4003 qmat = s->q_inter_matrix[qscale];
4004 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4006 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4007 threshold2= (threshold1<<1);
4008 for(i=63;i>=start_i;i--) {
4010 level = block[j] * qmat[j];
4012 if(((unsigned)(level+threshold1))>threshold2){
4019 for(i=start_i; i<=last_non_zero; i++) {
4021 level = block[j] * qmat[j];
4023 // if( bias+level >= (1<<QMAT_SHIFT)
4024 // || bias-level >= (1<<QMAT_SHIFT)){
4025 if(((unsigned)(level+threshold1))>threshold2){
4027 level= (bias + level)>>QMAT_SHIFT;
4030 level= (bias - level)>>QMAT_SHIFT;
4038 *overflow= s->max_qcoeff < max; //overflow might have happened
4040 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4041 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4042 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4044 return last_non_zero;
4047 #define OFFSET(x) offsetof(MpegEncContext, x)
4048 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4049 static const AVOption h263_options[] = {
4050 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4051 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4052 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4057 static const AVClass h263_class = {
4058 .class_name = "H.263 encoder",
4059 .item_name = av_default_item_name,
4060 .option = h263_options,
4061 .version = LIBAVUTIL_VERSION_INT,
4064 AVCodec ff_h263_encoder = {
4066 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4067 .type = AVMEDIA_TYPE_VIDEO,
4068 .id = AV_CODEC_ID_H263,
4069 .priv_data_size = sizeof(MpegEncContext),
4070 .init = ff_MPV_encode_init,
4071 .encode2 = ff_MPV_encode_picture,
4072 .close = ff_MPV_encode_end,
4073 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4074 .priv_class = &h263_class,
4077 static const AVOption h263p_options[] = {
4078 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4079 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4080 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4081 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4085 static const AVClass h263p_class = {
4086 .class_name = "H.263p encoder",
4087 .item_name = av_default_item_name,
4088 .option = h263p_options,
4089 .version = LIBAVUTIL_VERSION_INT,
4092 AVCodec ff_h263p_encoder = {
4094 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4095 .type = AVMEDIA_TYPE_VIDEO,
4096 .id = AV_CODEC_ID_H263P,
4097 .priv_data_size = sizeof(MpegEncContext),
4098 .init = ff_MPV_encode_init,
4099 .encode2 = ff_MPV_encode_picture,
4100 .close = ff_MPV_encode_end,
4101 .capabilities = CODEC_CAP_SLICE_THREADS,
4102 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4103 .priv_class = &h263p_class,
4106 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4108 AVCodec ff_msmpeg4v2_encoder = {
4109 .name = "msmpeg4v2",
4110 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4111 .type = AVMEDIA_TYPE_VIDEO,
4112 .id = AV_CODEC_ID_MSMPEG4V2,
4113 .priv_data_size = sizeof(MpegEncContext),
4114 .init = ff_MPV_encode_init,
4115 .encode2 = ff_MPV_encode_picture,
4116 .close = ff_MPV_encode_end,
4117 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4118 .priv_class = &msmpeg4v2_class,
4121 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4123 AVCodec ff_msmpeg4v3_encoder = {
4125 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4126 .type = AVMEDIA_TYPE_VIDEO,
4127 .id = AV_CODEC_ID_MSMPEG4V3,
4128 .priv_data_size = sizeof(MpegEncContext),
4129 .init = ff_MPV_encode_init,
4130 .encode2 = ff_MPV_encode_picture,
4131 .close = ff_MPV_encode_end,
4132 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4133 .priv_class = &msmpeg4v3_class,
4136 FF_MPV_GENERIC_CLASS(wmv1)
4138 AVCodec ff_wmv1_encoder = {
4140 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4141 .type = AVMEDIA_TYPE_VIDEO,
4142 .id = AV_CODEC_ID_WMV1,
4143 .priv_data_size = sizeof(MpegEncContext),
4144 .init = ff_MPV_encode_init,
4145 .encode2 = ff_MPV_encode_picture,
4146 .close = ff_MPV_encode_end,
4147 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4148 .priv_class = &wmv1_class,